Adding support for Xenomai targets.
--- a/plugger.py Wed Mar 11 10:41:55 2009 +0100
+++ b/plugger.py Mon Mar 16 17:31:08 2009 +0100
@@ -1488,18 +1488,19 @@
if debug_vars is not None and \
len(debug_vars) == len(self.TracedIECPath):
for IECPath,value in zip(self.TracedIECPath, debug_vars):
- data_tuple = self.IECdebug_datas.get(IECPath, None)
- if data_tuple is not None:
- WeakCallableDict, data_log, status = data_tuple
- data_log.append((debug_tick, value))
- for weakcallable,(args,kwargs) in WeakCallableDict.iteritems():
- # delegate call to wx event loop
- #print weakcallable, value, args, kwargs
- if getattr(weakcallable, "SetValue", None) is not None:
- wx.CallAfter(weakcallable.SetValue, value, *args, **kwargs)
- elif getattr(weakcallable, "AddPoint", None) is not None:
- wx.CallAfter(weakcallable.AddPoint, debug_tick, value, *args, **kwargs)
- # This will block thread if more than one call is waiting
+ if value is not None:
+ data_tuple = self.IECdebug_datas.get(IECPath, None)
+ if data_tuple is not None:
+ WeakCallableDict, data_log, status = data_tuple
+ data_log.append((debug_tick, value))
+ for weakcallable,(args,kwargs) in WeakCallableDict.iteritems():
+ # delegate call to wx event loop
+ #print weakcallable, value, args, kwargs
+ if getattr(weakcallable, "SetValue", None) is not None:
+ wx.CallAfter(weakcallable.SetValue, value, *args, **kwargs)
+ elif getattr(weakcallable, "AddPoint", None) is not None:
+ wx.CallAfter(weakcallable.AddPoint, debug_tick, value, *args, **kwargs)
+ # This will block thread if more than one call is waiting
elif debug_vars is not None:
wx.CallAfter(self.logger.write_warning,
"Debug data not coherent %d != %d\n"%(len(debug_vars), len(self.TracedIECPath)))
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/targets/Xenomai/.cvsignore Mon Mar 16 17:31:08 2009 +0100
@@ -0,0 +1,1 @@
+*.pyc
--- a/targets/Xenomai/XSD Wed Mar 11 10:41:55 2009 +0100
+++ b/targets/Xenomai/XSD Mon Mar 16 17:31:08 2009 +0100
@@ -1,7 +1,7 @@
- <xsd:element name="Xenomai">
+ <xsd:element name="Xenomai">
<xsd:complexType>
%(toolchain_gcc)s
- <xsd:attribute name="xeno_config" type="xsd:string" use="optional" default="/usr/xenomai/"/>
+ <xsd:attribute name="XenoConfig" type="xsd:string" use="optional" default="/usr/xenomai/bin/xeno-config"/>
</xsd:complexType>
</xsd:element>
\ No newline at end of file
--- a/targets/Xenomai/__init__.py Wed Mar 11 10:41:55 2009 +0100
+++ b/targets/Xenomai/__init__.py Mon Mar 16 17:31:08 2009 +0100
@@ -1,1 +1,31 @@
-from target_xenomai import *
\ No newline at end of file
+from .. import toolchain_gcc
+from wxPopen import ProcessLogger
+
+class Xenomai_target(toolchain_gcc):
+ extension = ".so"
+ def getXenoConfig(self):
+ """ Get xeno-config from target parameters """
+ return self.PuginsRootInstance.BeremizRoot.getTargetType().getcontent()["value"].getXenoConfig()
+
+ def getBuilderLDFLAGS(self):
+ # get xeno-config from target parameters
+ xeno_config = self.getXenoConfig()
+
+ status, result, err_result = ProcessLogger(self.logger, xeno_config + " --xeno-ldflags", no_stdout=True).spin()
+ if status:
+ self.logger.write_error("Unable to get Xenomai's LDFLAGS\n")
+ xeno_ldlags = result.strip()
+
+ return toolchain_gcc.getBuilderLDFLAGS(self) + [xeno_ldlags, "-shared", "-lnative"]
+
+ def getBuilderCFLAGS(self):
+ # get xeno-config from target parameters
+ xeno_config = self.getXenoConfig()
+
+ status, result, err_result = ProcessLogger(self.logger, xeno_config + " --xeno-cflags", no_stdout=True).spin()
+ if status:
+ self.logger.write_error("Unable to get Xenomai's CFLAGS\n")
+ xeno_cflags = result.strip()
+
+ return toolchain_gcc.getBuilderCFLAGS(self) + [xeno_cflags]
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/targets/Xenomai/plc_Xenomai_main.c Mon Mar 16 17:31:08 2009 +0100
@@ -0,0 +1,247 @@
+/**
+ * Linux specific code
+ **/
+
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include <native/task.h>
+#include <native/timer.h>
+#include <native/mutex.h>
+#include <native/sem.h>
+
+unsigned int PLC_state = 0;
+#define PLC_STATE_TASK_CREATED 1
+#define PLC_STATE_PYTHON_MUTEX_CREATED 2
+#define PLC_STATE_PYTHON_WAIT_SEM_CREATED 4
+#define PLC_STATE_DEBUG_MUTEX_CREATED 8
+#define PLC_STATE_DEBUG_WAIT_SEM_CREATED 16
+
+/* provided by POUS.C */
+extern int common_ticktime__;
+
+long AtomicCompareExchange(long* atomicvar,long compared, long exchange)
+{
+ return __sync_val_compare_and_swap(atomicvar, compared, exchange);
+}
+
+void PLC_GetTime(IEC_TIME *CURRENT_TIME)
+{
+ RTIME current_time = rt_timer_read();
+ CURRENT_TIME->tv_sec = current_time / 1000000000;
+ CURRENT_TIME->tv_nsec = current_time % 1000000000;
+}
+
+RT_TASK PLC_task;
+RT_TASK WaitDebug_task;
+RT_TASK WaitPythonCommand_task;
+RT_TASK UnLockPython_task;
+RT_TASK LockPython_task;
+int PLC_shutdown = 0;
+
+void PLC_SetTimer(long long next, long long period)
+{
+ RTIME current_time = rt_timer_read();
+ rt_task_set_periodic(&PLC_task, current_time + next, rt_timer_ns2ticks(period));
+}
+
+void PLC_task_proc(void *arg)
+{
+ PLC_SetTimer(Ttick, Ttick);
+
+ while (1) {
+ PLC_GetTime(&__CURRENT_TIME);
+ __run();
+ if (PLC_shutdown) break;
+ rt_task_wait_period(NULL);
+ }
+}
+
+static int __debug_tick;
+
+RT_SEM python_wait_sem;
+RT_MUTEX python_mutex;
+RT_SEM debug_wait_sem;
+RT_MUTEX debug_mutex;
+
+void PLC_cleanup_all(void)
+{
+ if (PLC_state & PLC_STATE_TASK_CREATED) {
+ rt_task_delete(&PLC_task);
+ PLC_state &= ~PLC_STATE_TASK_CREATED;
+ }
+
+ if (PLC_state & PLC_STATE_PYTHON_WAIT_SEM_CREATED) {
+ rt_sem_delete(&python_wait_sem);
+ PLC_state &= ~ PLC_STATE_PYTHON_WAIT_SEM_CREATED;
+ }
+
+ if (PLC_state & PLC_STATE_PYTHON_MUTEX_CREATED) {
+ rt_mutex_delete(&python_mutex);
+ PLC_state &= ~ PLC_STATE_PYTHON_MUTEX_CREATED;
+ }
+
+ if (PLC_state & PLC_STATE_DEBUG_WAIT_SEM_CREATED) {
+ rt_sem_delete(&debug_wait_sem);
+ PLC_state &= ~ PLC_STATE_DEBUG_WAIT_SEM_CREATED;
+ }
+
+ if (PLC_state & PLC_STATE_DEBUG_MUTEX_CREATED) {
+ rt_mutex_delete(&debug_mutex);
+ PLC_state &= ~ PLC_STATE_DEBUG_MUTEX_CREATED;
+ }
+}
+
+int stopPLC()
+{
+ PLC_shutdown = 1;
+ /* Stop the PLC */
+ PLC_SetTimer(0, 0);
+ PLC_cleanup_all();
+ __cleanup();
+ __debug_tick = -1;
+ rt_sem_v(&debug_wait_sem);
+ rt_sem_v(&python_wait_sem);
+}
+
+//
+void catch_signal(int sig)
+{
+ stopPLC();
+// signal(SIGTERM, catch_signal);
+ signal(SIGINT, catch_signal);
+ printf("Got Signal %d\n",sig);
+ exit(0);
+}
+
+#define max_val(a,b) ((a>b)?a:b)
+int startPLC(int argc,char **argv)
+{
+ int ret = 0;
+
+ signal(SIGINT, catch_signal);
+
+ /* ne-memory-swapping for this program */
+ mlockall(MCL_CURRENT | MCL_FUTURE);
+
+ /* Translate PLC's microseconds to Ttick nanoseconds */
+ Ttick = 1000000 * max_val(common_ticktime__,1);
+
+ /* create python_wait_sem */
+ ret = rt_sem_create(&python_wait_sem, "python_wait_sem", 0, S_FIFO);
+ if (ret) goto error;
+ PLC_state |= PLC_STATE_PYTHON_WAIT_SEM_CREATED;
+
+ /* create python_mutex */
+ ret = rt_mutex_create(&python_mutex, "python_mutex");
+ if (ret) goto error;
+ PLC_state |= PLC_STATE_PYTHON_MUTEX_CREATED;
+
+ /* create debug_wait_sem */
+ ret = rt_sem_create(&debug_wait_sem, "debug_wait_sem", 0, S_FIFO);
+ if (ret) goto error;
+ PLC_state |= PLC_STATE_DEBUG_WAIT_SEM_CREATED;
+
+ /* create debug_mutex */
+ ret = rt_mutex_create(&debug_mutex, "debug_mutex");
+ if (ret) goto error;
+ PLC_state |= PLC_STATE_DEBUG_MUTEX_CREATED;
+
+ /* create can_driver_task */
+ ret = rt_task_create(&PLC_task, "PLC_task", 0, 50, 0);
+ if (ret) goto error;
+ PLC_state |= PLC_STATE_TASK_CREATED;
+
+ ret = __init(argc,argv);
+ if (ret) goto error;
+
+ /* start can_driver_task */
+ ret = rt_task_start(&PLC_task, &PLC_task_proc, NULL);
+ if (ret) goto error;
+
+ return 0;
+
+error:
+ PLC_cleanup_all();
+ return 1;
+}
+
+int TryEnterDebugSection(void)
+{
+ return rt_mutex_acquire(&debug_mutex, TM_NONBLOCK) == 0;
+}
+
+void LeaveDebugSection(void)
+{
+ rt_mutex_release(&debug_mutex);
+}
+
+extern int __tick;
+/* from plc_debugger.c */
+int WaitDebugData()
+{
+ rt_task_shadow(&WaitDebug_task, "WaitDebug_task", 0, 0);
+ /* Wait signal from PLC thread */
+ rt_sem_p(&debug_wait_sem, TM_INFINITE);
+ return __debug_tick;
+}
+
+/* Called by PLC thread when debug_publish finished
+ * This is supposed to unlock debugger thread in WaitDebugData*/
+void InitiateDebugTransfer()
+{
+ /* remember tick */
+ __debug_tick = __tick;
+ /* signal debugger thread it can read data */
+ rt_sem_v(&debug_wait_sem);
+}
+
+void suspendDebug(void)
+{
+ __DEBUG = 0;
+ /* Prevent PLC to enter debug code */
+ rt_mutex_acquire(&debug_mutex, TM_INFINITE);
+}
+
+void resumeDebug(void)
+{
+ __DEBUG = 1;
+ /* Let PLC enter debug code */
+ rt_mutex_release(&debug_mutex);
+}
+
+/* from plc_python.c */
+int WaitPythonCommands(void)
+{
+ rt_task_shadow(&WaitPythonCommand_task, "WaitPythonCommand_task", 0, 0);
+ /* Wait signal from PLC thread */
+ rt_sem_p(&python_wait_sem, TM_INFINITE);
+}
+
+/* Called by PLC thread on each new python command*/
+void UnBlockPythonCommands(void)
+{
+ /* signal debugger thread it can read data */
+ rt_sem_v(&python_wait_sem);
+}
+
+int TryLockPython(void)
+{
+ return rt_mutex_acquire(&python_mutex, TM_NONBLOCK) == 0;
+}
+
+void UnLockPython(void)
+{
+ rt_task_shadow(&UnLockPython_task, "UnLockPython_task", 0, 0);
+ rt_mutex_release(&python_mutex);
+}
+
+void LockPython(void)
+{
+ rt_task_shadow(&LockPython_task, "LockPython_task", 0, 0);
+ rt_mutex_acquire(&python_mutex, TM_INFINITE);
+}
--- a/targets/Xenomai/target_xenomai.py Wed Mar 11 10:41:55 2009 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,3 +0,0 @@
-class xenomai_target(targets.target_gcc):
- extensionexe = ""
- extensiondll = ""