--- a/Beremiz.py Mon Mar 12 14:10:19 2018 +0000
+++ b/Beremiz.py Tue May 22 17:28:18 2018 +0300
@@ -116,9 +116,13 @@
def ShowSplashScreen(self):
class Splash(AdvancedSplash):
+ Painted = False
+
def OnPaint(_self, event): # pylint: disable=no-self-argument
AdvancedSplash.OnPaint(_self, event)
- wx.CallAfter(self.AppStart)
+ if not _self.Painted: # trigger app start only once
+ _self.Painted = True
+ wx.CallAfter(self.AppStart)
bmp = wx.Image(self.splashPath).ConvertToBitmap()
self.splash = Splash(None,
bitmap=bmp,
--- a/Beremiz_service.py Mon Mar 12 14:10:19 2018 +0000
+++ b/Beremiz_service.py Tue May 22 17:28:18 2018 +0300
@@ -1,3 +1,4 @@
+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
@@ -30,12 +31,14 @@
import sys
import getopt
import threading
-from threading import Thread, currentThread, Semaphore
+from threading import Thread, currentThread, Semaphore, Lock
import traceback
import __builtin__
+import Pyro
import Pyro.core as pyro
-from runtime import PLCObject, ServicePublisher
+from runtime import PLCObject, ServicePublisher, MainWorker
+from runtime.xenomai import TryPreloadXenomai
import util.paths as paths
@@ -132,6 +135,8 @@
if __name__ == '__main__':
__builtin__.__dict__['_'] = lambda x: x
+ # TODO: add a cmdline parameter if Trying Preloading Xenomai makes problem
+ TryPreloadXenomai()
def Bpath(*args):
@@ -401,7 +406,7 @@
class Server(object):
def __init__(self, servicename, ip_addr, port,
- workdir, argv, autostart=False,
+ workdir, argv,
statuschange=None, evaluator=default_evaluator,
pyruntimevars=None):
self.continueloop = True
@@ -411,22 +416,48 @@
self.port = port
self.workdir = workdir
self.argv = argv
- self.plcobj = None
self.servicepublisher = None
- self.autostart = autostart
self.statuschange = statuschange
self.evaluator = evaluator
self.pyruntimevars = pyruntimevars
-
- def Loop(self):
+ self.plcobj = PLCObject(self)
+
+ def _to_be_published(self):
+ return self.servicename is not None and \
+ self.ip_addr is not None and \
+ self.ip_addr != "localhost" and \
+ self.ip_addr != "127.0.0.1"
+
+ def PrintServerInfo(self):
+ print(_("Pyro port :"), self.port)
+
+ # Beremiz IDE detects LOCAL:// runtime is ready by looking
+ # for self.workdir in the daemon's stdout.
+ print(_("Current working directory :"), self.workdir)
+
+ if self._to_be_published():
+ print(_("Publishing service on local network"))
+
+ sys.stdout.flush()
+
+ def PyroLoop(self, when_ready):
while self.continueloop:
+ Pyro.config.PYRO_MULTITHREADED = 0
pyro.initServer()
self.daemon = pyro.Daemon(host=self.ip_addr, port=self.port)
+
# pyro never frees memory after connection close if no timeout set
# taking too small timeout value may cause
# unwanted diconnection when IDE is kept busy for long periods
self.daemon.setTimeout(60)
- self.Start()
+
+ self.daemon.connect(self.plcobj, "PLCObject")
+
+ if self._to_be_published():
+ self.servicepublisher = ServicePublisher.ServicePublisher()
+ self.servicepublisher.RegisterService(self.servicename, self.ip_addr, self.port)
+
+ when_ready()
self.daemon.requestLoop()
self.daemon.sock.close()
@@ -440,39 +471,6 @@
self.plcobj.UnLoadPLC()
self._stop()
- def Start(self):
- self.plcobj = PLCObject(self.workdir, self.daemon, self.argv,
- self.statuschange, self.evaluator,
- self.pyruntimevars)
-
- uri = self.daemon.connect(self.plcobj, "PLCObject")
-
- print(_("Pyro port :"), self.port)
- print(_("Pyro object's uri :"), uri)
-
- # Beremiz IDE detects daemon start by looking
- # for self.workdir in the daemon's stdout.
- # Therefore don't delete the following line
- print(_("Current working directory :"), self.workdir)
-
- # Configure and publish service
- # Not publish service if localhost in address params
- if self.servicename is not None and \
- self.ip_addr is not None and \
- self.ip_addr != "localhost" and \
- self.ip_addr != "127.0.0.1":
- print(_("Publishing service on local network"))
- self.servicepublisher = ServicePublisher.ServicePublisher()
- self.servicepublisher.RegisterService(self.servicename, self.ip_addr, self.port)
-
- self.plcobj.AutoLoad()
- if self.plcobj.GetPLCstatus()[0] != "Empty":
- if self.autostart:
- self.plcobj.StartPLC()
- self.plcobj.StatusChange()
-
- sys.stdout.flush()
-
def _stop(self):
if self.plcobj is not None:
self.plcobj.StopPLC()
@@ -481,6 +479,13 @@
self.servicepublisher = None
self.daemon.shutdown(True)
+ def AutoLoad(self):
+ self.plcobj.AutoLoad()
+ if self.plcobj.GetPLCstatus()[0] == "Stopped":
+ if autostart:
+ self.plcobj.StartPLC()
+ self.plcobj.StatusChange()
+
if enabletwisted:
import warnings
@@ -529,13 +534,13 @@
return o.res
pyroserver = Server(servicename, given_ip, port,
- WorkingDir, argv, autostart,
+ WorkingDir, argv,
statuschange, evaluator, pyruntimevars)
taskbar_instance = BeremizTaskBarIcon(pyroserver, enablewx)
else:
pyroserver = Server(servicename, given_ip, port,
- WorkingDir, argv, autostart,
+ WorkingDir, argv,
statuschange, pyruntimevars=pyruntimevars)
@@ -631,19 +636,47 @@
except Exception:
LogMessageAndException(_("WAMP client startup failed. "))
+pyro_thread_started = Lock()
+pyro_thread_started.acquire()
+pyro_thread = Thread(target=pyroserver.PyroLoop,
+ kwargs=dict(when_ready=pyro_thread_started.release))
+pyro_thread.start()
+
+# Wait for pyro thread to be effective
+pyro_thread_started.acquire()
+
+pyroserver.PrintServerInfo()
if havetwisted or havewx:
- pyro_thread = Thread(target=pyroserver.Loop)
- pyro_thread.start()
-
+ ui_thread_started = Lock()
+ ui_thread_started.acquire()
if havetwisted:
- reactor.run()
- elif havewx:
- app.MainLoop()
-else:
- try:
- pyroserver.Loop()
- except KeyboardInterrupt:
- pass
+ # reactor._installSignalHandlersAgain()
+ def ui_thread_target():
+ # FIXME: had to disable SignaHandlers install because
+ # signal not working in non-main thread
+ reactor.run(installSignalHandlers=False)
+ else:
+ ui_thread_target = app.MainLoop
+
+ ui_thread = Thread(target=ui_thread_target)
+ ui_thread.start()
+
+ # This order ui loop to unblock main thread when ready.
+ if havetwisted:
+ reactor.callLater(0, ui_thread_started.release)
+ else:
+ wx.CallAfter(ui_thread_started.release)
+
+ # Wait for ui thread to be effective
+ ui_thread_started.acquire()
+ print("UI thread started successfully.")
+
+try:
+ MainWorker.runloop(pyroserver.AutoLoad)
+except KeyboardInterrupt:
+ pass
+
pyroserver.Quit()
sys.exit(0)
+ys.exit(0)
--- a/IDEFrame.py Mon Mar 12 14:10:19 2018 +0000
+++ b/IDEFrame.py Tue May 22 17:28:18 2018 +0300
@@ -1537,6 +1537,7 @@
self.ProjectTree.SetItemText(root, item_name)
self.ProjectTree.SetPyData(root, infos)
highlight_colours = self.Highlights.get(infos.get("tagname", None), (wx.Colour(255, 255, 255, 0), wx.BLACK))
+ self.ProjectTree.SetItemBackgroundColour(root, highlight_colours[0])
self.ProjectTree.SetItemTextColour(root, highlight_colours[1])
self.ProjectTree.SetItemExtraImage(root, None)
if infos["type"] == ITEM_POU:
--- a/ProjectController.py Mon Mar 12 14:10:19 2018 +0000
+++ b/ProjectController.py Tue May 22 17:28:18 2018 +0300
@@ -37,7 +37,7 @@
import re
import tempfile
from types import ListType
-from threading import Timer, Lock, Thread
+from threading import Timer
from datetime import datetime
from weakref import WeakKeyDictionary
from itertools import izip
@@ -242,7 +242,6 @@
# Setup debug information
self.IECdebug_datas = {}
- self.IECdebug_lock = Lock()
self.DebugTimer = None
self.ResetIECProgramsAndVariables()
@@ -258,7 +257,6 @@
# After __init__ root confnode is not valid
self.ProjectPath = None
self._setBuildPath(None)
- self.DebugThread = None
self.debug_break = False
self.previous_plcstate = None
# copy ConfNodeMethods so that it can be later customized
@@ -1420,9 +1418,32 @@
self.UpdateMethodsFromPLCStatus()
def SnapshotAndResetDebugValuesBuffers(self):
+ if self._connector is not None:
+ plc_status, Traces = self._connector.GetTraceVariables()
+ # print [dict.keys() for IECPath, (dict, log, status, fvalue) in self.IECdebug_datas.items()]
+ if plc_status == "Started":
+ if len(Traces) > 0:
+ for debug_tick, debug_buff in Traces:
+ debug_vars = UnpackDebugBuffer(debug_buff, self.TracedIECTypes)
+ if debug_vars is not None and len(debug_vars) == len(self.TracedIECPath):
+ for IECPath, values_buffer, value in izip(
+ self.TracedIECPath,
+ self.DebugValuesBuffers,
+ debug_vars):
+ IECdebug_data = self.IECdebug_datas.get(IECPath, None)
+ if IECdebug_data is not None and value is not None:
+ forced = IECdebug_data[2:4] == ["Forced", value]
+ if not IECdebug_data[4] and len(values_buffer) > 0:
+ values_buffer[-1] = (value, forced)
+ else:
+ values_buffer.append((value, forced))
+ self.DebugTicks.append(debug_tick)
+
buffers, self.DebugValuesBuffers = (self.DebugValuesBuffers,
[list() for dummy in xrange(len(self.TracedIECPath))])
+
ticks, self.DebugTicks = self.DebugTicks, []
+
return ticks, buffers
def RegisterDebugVarToConnector(self):
@@ -1431,7 +1452,6 @@
self.TracedIECPath = []
self.TracedIECTypes = []
if self._connector is not None:
- self.IECdebug_lock.acquire()
IECPathsToPop = []
for IECPath, data_tuple in self.IECdebug_datas.iteritems():
WeakCallableDict, _data_log, _status, fvalue, _buffer_list = data_tuple
@@ -1462,7 +1482,6 @@
self.TracedIECPath = []
self._connector.SetTraceVariablesList([])
self.SnapshotAndResetDebugValuesBuffers()
- self.IECdebug_lock.release()
def IsPLCStarted(self):
return self.previous_plcstate == "Started"
@@ -1494,7 +1513,6 @@
if IECPath != "__tick__" and IECPath not in self._IECPathToIdx:
return None
- self.IECdebug_lock.acquire()
# If no entry exist, create a new one with a fresh WeakKeyDictionary
IECdebug_data = self.IECdebug_datas.get(IECPath, None)
if IECdebug_data is None:
@@ -1510,14 +1528,11 @@
IECdebug_data[0][callableobj] = buffer_list
- self.IECdebug_lock.release()
-
self.ReArmDebugRegisterTimer()
return IECdebug_data[1]
def UnsubscribeDebugIECVariable(self, IECPath, callableobj):
- self.IECdebug_lock.acquire()
IECdebug_data = self.IECdebug_datas.get(IECPath, None)
if IECdebug_data is not None:
IECdebug_data[0].pop(callableobj, None)
@@ -1528,14 +1543,11 @@
lambda x, y: x | y,
IECdebug_data[0].itervalues(),
False)
- self.IECdebug_lock.release()
self.ReArmDebugRegisterTimer()
def UnsubscribeAllDebugIECVariable(self):
- self.IECdebug_lock.acquire()
self.IECdebug_datas = {}
- self.IECdebug_lock.release()
self.ReArmDebugRegisterTimer()
@@ -1543,30 +1555,22 @@
if IECPath not in self.IECdebug_datas:
return
- self.IECdebug_lock.acquire()
-
# If no entry exist, create a new one with a fresh WeakKeyDictionary
IECdebug_data = self.IECdebug_datas.get(IECPath, None)
IECdebug_data[2] = "Forced"
IECdebug_data[3] = fvalue
- self.IECdebug_lock.release()
-
self.ReArmDebugRegisterTimer()
def ReleaseDebugIECVariable(self, IECPath):
if IECPath not in self.IECdebug_datas:
return
- self.IECdebug_lock.acquire()
-
# If no entry exist, create a new one with a fresh WeakKeyDictionary
IECdebug_data = self.IECdebug_datas.get(IECPath, None)
IECdebug_data[2] = "Registered"
IECdebug_data[3] = None
- self.IECdebug_lock.release()
-
self.ReArmDebugRegisterTimer()
def CallWeakcallables(self, IECPath, function_name, *cargs):
@@ -1590,51 +1594,8 @@
return -1, "No runtime connected!"
return self._connector.RemoteExec(script, **kwargs)
- def DebugThreadProc(self):
- """
- This thread waid PLC debug data, and dispatch them to subscribers
- """
- self.debug_break = False
- debug_getvar_retry = 0
- while (not self.debug_break) and (self._connector is not None):
- plc_status, Traces = self._connector.GetTraceVariables()
- debug_getvar_retry += 1
- # print [dict.keys() for IECPath, (dict, log, status, fvalue) in self.IECdebug_datas.items()]
- if plc_status == "Started":
- if len(Traces) > 0:
- self.IECdebug_lock.acquire()
- for debug_tick, debug_buff in Traces:
- debug_vars = UnpackDebugBuffer(debug_buff, self.TracedIECTypes)
- if debug_vars is not None and len(debug_vars) == len(self.TracedIECPath):
- for IECPath, values_buffer, value in izip(
- self.TracedIECPath,
- self.DebugValuesBuffers,
- debug_vars):
- IECdebug_data = self.IECdebug_datas.get(IECPath, None) # FIXME get
- if IECdebug_data is not None and value is not None:
- forced = IECdebug_data[2:4] == ["Forced", value]
- if not IECdebug_data[4] and len(values_buffer) > 0:
- values_buffer[-1] = (value, forced)
- else:
- values_buffer.append((value, forced))
- self.DebugTicks.append(debug_tick)
- debug_getvar_retry = 0
- self.IECdebug_lock.release()
-
- if debug_getvar_retry != 0:
- # Be patient, tollerate PLC to come with fresh samples
- time.sleep(0.1)
- else:
- self.debug_break = True
- self.logger.write(_("Debugger disabled\n"))
- self.DebugThread = None
- if self.DispatchDebugValuesTimer is not None:
- self.DispatchDebugValuesTimer.Stop()
-
def DispatchDebugValuesProc(self, event):
- self.IECdebug_lock.acquire()
debug_ticks, buffers = self.SnapshotAndResetDebugValuesBuffers()
- self.IECdebug_lock.release()
start_time = time.time()
if len(self.TracedIECPath) == len(buffers):
for IECPath, values in izip(self.TracedIECPath, buffers):
@@ -1645,22 +1606,12 @@
delay = time.time() - start_time
next_refresh = max(REFRESH_PERIOD - delay, 0.2 * delay)
- if self.DispatchDebugValuesTimer is not None and self.DebugThread is not None:
+ if self.DispatchDebugValuesTimer is not None:
self.DispatchDebugValuesTimer.Start(
int(next_refresh * 1000), oneShot=True)
event.Skip()
def KillDebugThread(self):
- tmp_debugthread = self.DebugThread
- self.debug_break = True
- if tmp_debugthread is not None:
- self.logger.writeyield(_("Stopping debugger...\n"))
- tmp_debugthread.join(timeout=5)
- if tmp_debugthread.isAlive() and self.logger:
- self.logger.write_warning(_("Couldn't stop debugger.\n"))
- else:
- self.logger.write(_("Debugger stopped.\n"))
- self.DebugThread = None
if self.DispatchDebugValuesTimer is not None:
self.DispatchDebugValuesTimer.Stop()
@@ -1672,9 +1623,6 @@
if self.DispatchDebugValuesTimer is not None:
self.DispatchDebugValuesTimer.Start(
int(REFRESH_PERIOD * 1000), oneShot=True)
- if self.DebugThread is None:
- self.DebugThread = Thread(target=self.DebugThreadProc)
- self.DebugThread.start()
def _Run(self):
"""
--- a/canfestival/canfestival.py Mon Mar 12 14:10:19 2018 +0000
+++ b/canfestival/canfestival.py Tue May 22 17:28:18 2018 +0300
@@ -38,20 +38,16 @@
LOCATION_CONFNODE, \
LOCATION_VAR_MEMORY
-try:
- from nodelist import NodeList
-except ImportError:
- base_folder = paths.AbsParentDir(__file__, 2)
- CanFestivalPath = os.path.join(base_folder, "CanFestival-3")
- sys.path.append(os.path.join(CanFestivalPath, "objdictgen"))
-
- from nodelist import NodeList
-
+base_folder = paths.AbsParentDir(__file__, 2) # noqa
+CanFestivalPath = os.path.join(base_folder, "CanFestival-3") # noqa
+sys.path.append(os.path.join(CanFestivalPath, "objdictgen")) # noqa
+
+# pylint: disable=wrong-import-position
+from nodelist import NodeList
from nodemanager import NodeManager
import gen_cfile
import eds_utils
import canfestival_config as local_canfestival_config # pylint: disable=import-error
-
from commondialogs import CreateNodeDialog
from subindextable import IECTypeConversion, SizeConversion
from canfestival import config_utils
--- a/connectors/PYRO/__init__.py Mon Mar 12 14:10:19 2018 +0000
+++ b/connectors/PYRO/__init__.py Tue May 22 17:28:18 2018 +0300
@@ -139,67 +139,24 @@
confnodesroot.logger.write_error(_("Cannot get PLC status - connection failed.\n"))
return None
+ _special_return_funcs = {
+ "StartPLC": False,
+ "GetTraceVariables": ("Broken", None),
+ "GetPLCstatus": ("Broken", None),
+ "RemoteExec": (-1, "RemoteExec script failed!")
+ }
+
class PyroProxyProxy(object):
"""
A proxy proxy class to handle Beremiz Pyro interface specific behavior.
And to put Pyro exception catcher in between caller and Pyro proxy
"""
- def __init__(self):
- # for safe use in from debug thread, must create a copy
- self.RemotePLCObjectProxyCopy = None
-
- def GetPyroProxy(self):
- """
- This func returns the real Pyro Proxy.
- Use this if you musn't keep reference to it.
- """
- return RemotePLCObjectProxy
-
- def _PyroStartPLC(self, *args, **kwargs):
- """
- confnodesroot._connector.GetPyroProxy() is used
- rather than RemotePLCObjectProxy because
- object is recreated meanwhile,
- so we must not keep ref to it here
- """
- current_status, _log_count = confnodesroot._connector.GetPyroProxy().GetPLCstatus()
- if current_status == "Dirty":
- # Some bad libs with static symbols may polute PLC
- # ask runtime to suicide and come back again
-
- confnodesroot.logger.write(_("Force runtime reload\n"))
- confnodesroot._connector.GetPyroProxy().ForceReload()
- confnodesroot._Disconnect()
- # let remote PLC time to resurect.(freeze app)
- sleep(0.5)
- confnodesroot._Connect()
- self.RemotePLCObjectProxyCopy = copy.copy(confnodesroot._connector.GetPyroProxy())
- return confnodesroot._connector.GetPyroProxy().StartPLC(*args, **kwargs)
- StartPLC = PyroCatcher(_PyroStartPLC, False)
-
- def _PyroGetTraceVariables(self):
- """
- for safe use in from debug thread, must use the copy
- """
- if self.RemotePLCObjectProxyCopy is None:
- self.RemotePLCObjectProxyCopy = copy.copy(confnodesroot._connector.GetPyroProxy())
- return self.RemotePLCObjectProxyCopy.GetTraceVariables()
- GetTraceVariables = PyroCatcher(_PyroGetTraceVariables, ("Broken", None))
-
- def _PyroGetPLCstatus(self):
- return RemotePLCObjectProxy.GetPLCstatus()
- GetPLCstatus = PyroCatcher(_PyroGetPLCstatus, ("Broken", None))
-
- def _PyroRemoteExec(self, script, **kwargs):
- return RemotePLCObjectProxy.RemoteExec(script, **kwargs)
- RemoteExec = PyroCatcher(_PyroRemoteExec, (-1, "RemoteExec script failed!"))
-
def __getattr__(self, attrName):
member = self.__dict__.get(attrName, None)
if member is None:
def my_local_func(*args, **kwargs):
return RemotePLCObjectProxy.__getattr__(attrName)(*args, **kwargs)
- member = PyroCatcher(my_local_func, None)
+ member = PyroCatcher(my_local_func, _special_return_funcs.get(attrName, None))
self.__dict__[attrName] = member
return member
--- a/controls/LogViewer.py Mon Mar 12 14:10:19 2018 +0000
+++ b/controls/LogViewer.py Tue May 22 17:28:18 2018 +0300
@@ -396,7 +396,7 @@
self.HasNewData = False
def SetLogSource(self, log_source):
- self.LogSource = proxy(log_source) if log_source else None
+ self.LogSource = proxy(log_source) if log_source is not None else None
self.CleanButton.Enable(self.LogSource is not None)
if log_source is not None:
self.ResetLogMessages()
--- a/editors/ConfTreeNodeEditor.py Mon Mar 12 14:10:19 2018 +0000
+++ b/editors/ConfTreeNodeEditor.py Tue May 22 17:28:18 2018 +0300
@@ -489,6 +489,7 @@
textctrl.ChangeValue(str(element_infos["value"]))
callback = self.GetTextCtrlCallBackFunction(textctrl, element_path)
textctrl.Bind(wx.EVT_TEXT_ENTER, callback)
+ textctrl.Bind(wx.EVT_TEXT, callback)
textctrl.Bind(wx.EVT_KILL_FOCUS, callback)
first = False
sizer.Layout()
--- a/runtime/PLCObject.py Mon Mar 12 14:10:19 2018 +0000
+++ b/runtime/PLCObject.py Tue May 22 17:28:18 2018 +0300
@@ -23,7 +23,8 @@
from __future__ import absolute_import
-from threading import Timer, Thread, Lock, Semaphore, Event
+import thread
+from threading import Thread, Lock, Semaphore, Event, Condition
import ctypes
import os
import sys
@@ -60,31 +61,139 @@
sys.stdout.flush()
+class job(object):
+ """
+ job to be executed by a worker
+ """
+ def __init__(self, call, *args, **kwargs):
+ self.job = (call, args, kwargs)
+ self.result = None
+ self.success = False
+ self.exc_info = None
+
+ def do(self):
+ """
+ do the job by executing the call, and deal with exceptions
+ """
+ try:
+ call, args, kwargs = self.job
+ self.result = call(*args, **kwargs)
+ self.success = True
+ except Exception:
+ self.success = False
+ self.exc_info = sys.exc_info()
+
+
+class worker(object):
+ """
+ serialize main thread load/unload of PLC shared objects
+ """
+ def __init__(self):
+ # Only one job at a time
+ self._finish = False
+ self._threadID = None
+ self.mutex = Lock()
+ self.todo = Condition(self.mutex)
+ self.done = Condition(self.mutex)
+ self.free = Condition(self.mutex)
+ self.job = None
+
+ def runloop(self, *args, **kwargs):
+ """
+ meant to be called by worker thread (blocking)
+ """
+ self._threadID = thread.get_ident()
+ if args or kwargs:
+ job(*args, **kwargs).do()
+ # result is ignored
+ self.mutex.acquire()
+ while not self._finish:
+ self.todo.wait()
+ if self.job is not None:
+ self.job.do()
+ self.done.notify()
+ else:
+ self.free.notify()
+ self.mutex.release()
+
+ def call(self, *args, **kwargs):
+ """
+ creates a job, execute it in worker thread, and deliver result.
+ if job execution raise exception, re-raise same exception
+ meant to be called by non-worker threads, but this is accepted.
+ blocking until job done
+ """
+
+ _job = job(*args, **kwargs)
+
+ if self._threadID == thread.get_ident() or self._threadID is None:
+ # if caller is worker thread execute immediately
+ _job.do()
+ else:
+ # otherwise notify and wait for completion
+ self.mutex.acquire()
+
+ while self.job is not None:
+ self.free.wait()
+
+ self.job = _job
+ self.todo.notify()
+ self.done.wait()
+ _job = self.job
+ self.job = None
+ self.mutex.release()
+
+ if _job.success:
+ return _job.result
+ else:
+ raise _job.exc_info[0], _job.exc_info[1], _job.exc_info[2]
+
+ def quit(self):
+ """
+ unblocks main thread, and terminate execution of runloop()
+ """
+ # mark queue
+ self._finish = True
+ self.mutex.acquire()
+ self.job = None
+ self.todo.notify()
+ self.mutex.release()
+
+
+MainWorker = worker()
+
+
+def RunInMain(func):
+ def func_wrapper(*args, **kwargs):
+ return MainWorker.call(func, *args, **kwargs)
+ return func_wrapper
+
+
class PLCObject(pyro.ObjBase):
- def __init__(self, workingdir, daemon, argv, statuschange, evaluator, pyruntimevars):
+ def __init__(self, server):
pyro.ObjBase.__init__(self)
- self.evaluator = evaluator
- self.argv = [workingdir] + argv # force argv[0] to be "path" to exec...
- self.workingdir = workingdir
+ self.evaluator = server.evaluator
+ self.argv = [server.workdir] + server.argv # force argv[0] to be "path" to exec...
+ self.workingdir = server.workdir
self.PLCStatus = "Empty"
self.PLClibraryHandle = None
self.PLClibraryLock = Lock()
self.DummyIteratorLock = None
# Creates fake C funcs proxies
- self._FreePLC()
- self.daemon = daemon
- self.statuschange = statuschange
+ self._InitPLCStubCalls()
+ self.daemon = server.daemon
+ self.statuschange = server.statuschange
self.hmi_frame = None
- self.pyruntimevars = pyruntimevars
+ self.pyruntimevars = server.pyruntimevars
self._loading_error = None
self.python_runtime_vars = None
self.TraceThread = None
self.TraceLock = Lock()
- self.TraceWakeup = Event()
self.Traces = []
+ # First task of worker -> no @RunInMain
def AutoLoad(self):
- # Get the last transfered PLC if connector must be restart
+ # Get the last transfered PLC
try:
self.CurrentPLCFilename = open(
self._GetMD5FileName(),
@@ -100,6 +209,7 @@
for callee in self.statuschange:
callee(self.PLCStatus)
+ @RunInMain
def LogMessage(self, *args):
if len(args) == 2:
level, msg = args
@@ -111,16 +221,19 @@
return self._LogMessage(level, msg, len(msg))
return None
+ @RunInMain
def ResetLogCount(self):
if self._ResetLogCount is not None:
self._ResetLogCount()
+ # used internaly
def GetLogCount(self, level):
if self._GetLogCount is not None:
return int(self._GetLogCount(level))
elif self._loading_error is not None and level == 0:
return 1
+ @RunInMain
def GetLogMessage(self, level, msgid):
tick = ctypes.c_uint32()
tv_sec = ctypes.c_uint32()
@@ -145,12 +258,13 @@
def _GetLibFileName(self):
return os.path.join(self.workingdir, self.CurrentPLCFilename)
- def LoadPLC(self):
+ def _LoadPLC(self):
"""
Load PLC library
Declare all functions, arguments and return values
"""
md5 = open(self._GetMD5FileName(), "r").read()
+ self.PLClibraryLock.acquire()
try:
self._PLClibraryHandle = dlopen(self._GetLibFileName())
self.PLClibraryHandle = ctypes.CDLL(self.CurrentPLCFilename, handle=self._PLClibraryHandle)
@@ -227,25 +341,34 @@
self._loading_error = None
- self.PythonRuntimeInit()
-
- return True
except Exception:
self._loading_error = traceback.format_exc()
PLCprint(self._loading_error)
return False
-
+ finally:
+ self.PLClibraryLock.release()
+
+ return True
+
+ @RunInMain
+ def LoadPLC(self):
+ res = self._LoadPLC()
+ if res:
+ self.PythonRuntimeInit()
+ else:
+ self._FreePLC()
+
+ return res
+
+ @RunInMain
def UnLoadPLC(self):
self.PythonRuntimeCleanup()
self._FreePLC()
- def _FreePLC(self):
- """
- Unload PLC library.
- This is also called by __init__ to create dummy C func proxies
- """
- self.PLClibraryLock.acquire()
- # Forget all refs to library
+ def _InitPLCStubCalls(self):
+ """
+ create dummy C func proxies
+ """
self._startPLC = lambda x, y: None
self._stopPLC = lambda: None
self._ResetDebugVariables = lambda: None
@@ -259,13 +382,26 @@
self._GetLogCount = None
self._LogMessage = None
self._GetLogMessage = None
+ self._PLClibraryHandle = None
self.PLClibraryHandle = None
- # Unload library explicitely
- if getattr(self, "_PLClibraryHandle", None) is not None:
- dlclose(self._PLClibraryHandle)
- self._PLClibraryHandle = None
-
- self.PLClibraryLock.release()
+
+ def _FreePLC(self):
+ """
+ Unload PLC library.
+ This is also called by __init__ to create dummy C func proxies
+ """
+ self.PLClibraryLock.acquire()
+ try:
+ # Unload library explicitely
+ if getattr(self, "_PLClibraryHandle", None) is not None:
+ dlclose(self._PLClibraryHandle)
+
+ # Forget all refs to library
+ self._InitPLCStubCalls()
+
+ finally:
+ self.PLClibraryLock.release()
+
return False
def PythonRuntimeCall(self, methodname):
@@ -278,6 +414,7 @@
if exp is not None:
self.LogMessage(0, '\n'.join(traceback.format_exception(*exp)))
+ # used internaly
def PythonRuntimeInit(self):
MethodNames = ["init", "start", "stop", "cleanup"]
self.python_runtime_vars = globals().copy()
@@ -329,6 +466,7 @@
self.PythonRuntimeCall("init")
+ # used internaly
def PythonRuntimeCleanup(self):
if self.python_runtime_vars is not None:
self.PythonRuntimeCall("cleanup")
@@ -340,10 +478,8 @@
res, cmd, blkid = "None", "None", ctypes.c_void_p()
compile_cache = {}
while True:
- # print "_PythonIterator(", res, ")",
cmd = self._PythonIterator(res, blkid)
FBID = blkid.value
- # print " -> ", cmd, blkid
if cmd is None:
break
try:
@@ -364,6 +500,7 @@
res = "#EXCEPTION : "+str(e)
self.LogMessage(1, ('PyEval@0x%x(Code="%s") Exception "%s"') % (FBID, cmd, str(e)))
+ @RunInMain
def StartPLC(self):
if self.CurrentPLCFilename is not None and self.PLCStatus == "Stopped":
c_argv = ctypes.c_char_p * len(self.argv)
@@ -382,6 +519,7 @@
self.PLCStatus = "Broken"
self.StatusChange()
+ @RunInMain
def StopPLC(self):
if self.PLCStatus == "Started":
self.LogMessage("PLC stopped")
@@ -391,40 +529,38 @@
self.StatusChange()
self.PythonRuntimeCall("stop")
if self.TraceThread is not None:
- self.TraceWakeup.set()
self.TraceThread.join()
self.TraceThread = None
return True
return False
- def _Reload(self):
- self.daemon.shutdown(True)
- self.daemon.sock.close()
- os.execv(sys.executable, [sys.executable]+sys.argv[:])
- # never reached
- return 0
-
- def ForceReload(self):
- # respawn python interpreter
- Timer(0.1, self._Reload).start()
- return True
-
+ @RunInMain
def GetPLCstatus(self):
return self.PLCStatus, map(self.GetLogCount, xrange(LogLevelsCount))
+ @RunInMain
def NewPLC(self, md5sum, data, extrafiles):
if self.PLCStatus in ["Stopped", "Empty", "Broken"]:
NewFileName = md5sum + lib_ext
extra_files_log = os.path.join(self.workingdir, "extra_files.txt")
- self.UnLoadPLC()
+ old_PLC_filename = os.path.join(self.workingdir, self.CurrentPLCFilename) \
+ if self.CurrentPLCFilename is not None \
+ else None
+ new_PLC_filename = os.path.join(self.workingdir, NewFileName)
+
+ # Some platform (i.e. Xenomai) don't like reloading same .so file
+ replace_PLC_shared_object = new_PLC_filename != old_PLC_filename
+
+ if replace_PLC_shared_object:
+ self.UnLoadPLC()
self.LogMessage("NewPLC (%s)" % md5sum)
self.PLCStatus = "Empty"
try:
- os.remove(os.path.join(self.workingdir,
- self.CurrentPLCFilename))
+ if replace_PLC_shared_object:
+ os.remove(old_PLC_filename)
for filename in file(extra_files_log, "r").readlines() + [extra_files_log]:
try:
os.remove(os.path.join(self.workingdir, filename.strip()))
@@ -435,8 +571,8 @@
try:
# Create new PLC file
- open(os.path.join(self.workingdir, NewFileName),
- 'wb').write(data)
+ if replace_PLC_shared_object:
+ open(new_PLC_filename, 'wb').write(data)
# Store new PLC filename based on md5 key
open(self._GetMD5FileName(), "w").write(md5sum)
@@ -456,11 +592,12 @@
PLCprint(traceback.format_exc())
return False
- if self.LoadPLC():
+ if not replace_PLC_shared_object:
+ self.PLCStatus = "Stopped"
+ elif self.LoadPLC():
self.PLCStatus = "Stopped"
else:
self.PLCStatus = "Broken"
- self._FreePLC()
self.StatusChange()
return self.PLCStatus == "Stopped"
@@ -496,14 +633,6 @@
else:
self._suspendDebug(True)
- def _TracesPush(self, trace):
- self.TraceLock.acquire()
- lT = len(self.Traces)
- if lT != 0 and lT * len(self.Traces[0]) > 1024 * 1024:
- self.Traces.pop(0)
- self.Traces.append(trace)
- self.TraceLock.release()
-
def _TracesSwap(self):
self.LastSwapTrace = time()
if self.TraceThread is None and self.PLCStatus == "Started":
@@ -513,26 +642,9 @@
Traces = self.Traces
self.Traces = []
self.TraceLock.release()
- self.TraceWakeup.set()
return Traces
- def _TracesAutoSuspend(self):
- # TraceProc stops here if Traces not polled for 3 seconds
- traces_age = time() - self.LastSwapTrace
- if traces_age > 3:
- self.TraceLock.acquire()
- self.Traces = []
- self.TraceLock.release()
- self._suspendDebug(True) # Disable debugger
- self.TraceWakeup.clear()
- self.TraceWakeup.wait()
- self._resumeDebug() # Re-enable debugger
-
- def _TracesFlush(self):
- self.TraceLock.acquire()
- self.Traces = []
- self.TraceLock.release()
-
+ @RunInMain
def GetTraceVariables(self):
return self.PLCStatus, self._TracesSwap()
@@ -540,23 +652,47 @@
"""
Return a list of traces, corresponding to the list of required idx
"""
+ self._resumeDebug() # Re-enable debugger
while self.PLCStatus == "Started":
tick = ctypes.c_uint32()
size = ctypes.c_uint32()
buff = ctypes.c_void_p()
TraceBuffer = None
- if self.PLClibraryLock.acquire(False):
- if self._GetDebugData(ctypes.byref(tick),
- ctypes.byref(size),
- ctypes.byref(buff)) == 0:
- if size.value:
- TraceBuffer = ctypes.string_at(buff.value, size.value)
- self._FreeDebugData()
- self.PLClibraryLock.release()
+
+ self.PLClibraryLock.acquire()
+
+ res = self._GetDebugData(ctypes.byref(tick),
+ ctypes.byref(size),
+ ctypes.byref(buff))
+ if res == 0:
+ if size.value:
+ TraceBuffer = ctypes.string_at(buff.value, size.value)
+ self._FreeDebugData()
+
+ self.PLClibraryLock.release()
+
+ # leave thread if GetDebugData isn't happy.
+ if res != 0:
+ break
+
if TraceBuffer is not None:
- self._TracesPush((tick.value, TraceBuffer))
- self._TracesAutoSuspend()
- self._TracesFlush()
+ self.TraceLock.acquire()
+ lT = len(self.Traces)
+ if lT != 0 and lT * len(self.Traces[0]) > 1024 * 1024:
+ self.Traces.pop(0)
+ self.Traces.append((tick.value, TraceBuffer))
+ self.TraceLock.release()
+
+ # TraceProc stops here if Traces not polled for 3 seconds
+ traces_age = time() - self.LastSwapTrace
+ if traces_age > 3:
+ self.TraceLock.acquire()
+ self.Traces = []
+ self.TraceLock.release()
+ self._suspendDebug(True) # Disable debugger
+ break
+
+ self.TraceThread = None
def RemoteExec(self, script, *kwargs):
try:
--- a/runtime/__init__.py Mon Mar 12 14:10:19 2018 +0000
+++ b/runtime/__init__.py Tue May 22 17:28:18 2018 +0300
@@ -24,5 +24,5 @@
from __future__ import absolute_import
import os
-from runtime.PLCObject import PLCObject, PLCprint
+from runtime.PLCObject import PLCObject, PLCprint, MainWorker
import runtime.ServicePublisher
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/runtime/xenomai.py Tue May 22 17:28:18 2018 +0300
@@ -0,0 +1,16 @@
+from ctypes import CDLL, RTLD_GLOBAL, pointer, c_int, POINTER, c_char, create_string_buffer
+def TryPreloadXenomai():
+ """
+ Xenomai 3 (at least for version <= 3.0.6) do not handle properly dlclose
+ of shared objects whose dlopen did trigger xenomai_init.
+ As a workaround, this pre-loads xenomai libraries that need to be
+ initialized and call xenomai_init once for all.
+
+ Xenomai auto init of libs MUST be disabled (see --auto-init-solib in xeno-config)
+ """
+ try:
+ for name in ["cobalt", "modechk", "copperplate", "alchemy"]:
+ globals()[name] = CDLL("lib"+name+".so", mode=RTLD_GLOBAL)
+ cobalt.xenomai_init(pointer(c_int(0)), pointer((POINTER(c_char)*2)(create_string_buffer("prog_name"), None)))
+ except:
+ pass
--- a/targets/Xenomai/__init__.py Mon Mar 12 14:10:19 2018 +0000
+++ b/targets/Xenomai/__init__.py Tue May 22 17:28:18 2018 +0300
@@ -37,7 +37,7 @@
if xeno_config:
from util.ProcessLogger import ProcessLogger
status, result, _err_result = ProcessLogger(self.CTRInstance.logger,
- xeno_config + " --skin=native --"+flagsname,
+ xeno_config + " --skin=posix --skin=alchemy --no-auto-init --"+flagsname,
no_stdout=True).spin()
if status:
self.CTRInstance.logger.write_error(_("Unable to get Xenomai's %s \n") % flagsname)
--- a/targets/Xenomai/plc_Xenomai_main.c Mon Mar 12 14:10:19 2018 +0000
+++ b/targets/Xenomai/plc_Xenomai_main.c Tue May 22 17:28:18 2018 +0300
@@ -11,10 +11,10 @@
#include <sys/mman.h>
#include <sys/fcntl.h>
-#include <native/task.h>
-#include <native/timer.h>
-#include <native/sem.h>
-#include <native/pipe.h>
+#include <alchemy/task.h>
+#include <alchemy/timer.h>
+#include <alchemy/sem.h>
+#include <alchemy/pipe.h>
unsigned int PLC_state = 0;
#define PLC_STATE_TASK_CREATED 1
@@ -37,6 +37,15 @@
#define PYTHON_PIPE_MINOR 3
#define PIPE_SIZE 1
+// rt-pipes commands
+
+#define PYTHON_PENDING_COMMAND 1
+#define PYTHON_FINISH 2
+
+#define DEBUG_FINISH 2
+
+#define DEBUG_PENDING_DATA 1
+#define DEBUG_UNLOCK 1
long AtomicCompareExchange(long* atomicvar,long compared, long exchange)
{
@@ -82,6 +91,18 @@
if (PLC_shutdown) break;
rt_task_wait_period(NULL);
}
+ /* since xenomai 3 it is not enough to close()
+ file descriptor to unblock read()... */
+ {
+ /* explicitely finish python thread */
+ char msg = PYTHON_FINISH;
+ rt_pipe_write(&WaitPython_pipe, &msg, sizeof(msg), P_NORMAL);
+ }
+ {
+ /* explicitely finish debug thread */
+ char msg = DEBUG_FINISH;
+ rt_pipe_write(&WaitDebug_pipe, &msg, sizeof(msg), P_NORMAL);
+ }
}
static unsigned long __debug_tick;
@@ -159,6 +180,15 @@
exit(0);
}
+#define _startPLCLog(text) \
+ {\
+ char mstr[] = text;\
+ LogMessage(LOG_CRITICAL, mstr, sizeof(mstr));\
+ goto error;\
+ }
+
+#define FO "Failed opening "
+
#define max_val(a,b) ((a>b)?a:b)
int startPLC(int argc,char **argv)
{
@@ -171,49 +201,55 @@
/*** RT Pipes creation and opening ***/
/* create Debug_pipe */
- if(rt_pipe_create(&Debug_pipe, "Debug_pipe", DEBUG_PIPE_MINOR, PIPE_SIZE))
- goto error;
+ if(rt_pipe_create(&Debug_pipe, "Debug_pipe", DEBUG_PIPE_MINOR, PIPE_SIZE) < 0)
+ _startPLCLog(FO "Debug_pipe real-time end");
PLC_state |= PLC_STATE_DEBUG_PIPE_CREATED;
/* open Debug_pipe*/
- if((Debug_pipe_fd = open(DEBUG_PIPE_DEVICE, O_RDWR)) == -1) goto error;
+ if((Debug_pipe_fd = open(DEBUG_PIPE_DEVICE, O_RDWR)) == -1)
+ _startPLCLog(FO DEBUG_PIPE_DEVICE);
PLC_state |= PLC_STATE_DEBUG_FILE_OPENED;
/* create Python_pipe */
- if(rt_pipe_create(&Python_pipe, "Python_pipe", PYTHON_PIPE_MINOR, PIPE_SIZE))
- goto error;
+ if(rt_pipe_create(&Python_pipe, "Python_pipe", PYTHON_PIPE_MINOR, PIPE_SIZE) < 0)
+ _startPLCLog(FO "Python_pipe real-time end");
PLC_state |= PLC_STATE_PYTHON_PIPE_CREATED;
/* open Python_pipe*/
- if((Python_pipe_fd = open(PYTHON_PIPE_DEVICE, O_RDWR)) == -1) goto error;
+ if((Python_pipe_fd = open(PYTHON_PIPE_DEVICE, O_RDWR)) == -1)
+ _startPLCLog(FO PYTHON_PIPE_DEVICE);
PLC_state |= PLC_STATE_PYTHON_FILE_OPENED;
/* create WaitDebug_pipe */
- if(rt_pipe_create(&WaitDebug_pipe, "WaitDebug_pipe", WAITDEBUG_PIPE_MINOR, PIPE_SIZE))
- goto error;
+ if(rt_pipe_create(&WaitDebug_pipe, "WaitDebug_pipe", WAITDEBUG_PIPE_MINOR, PIPE_SIZE) < 0)
+ _startPLCLog(FO "WaitDebug_pipe real-time end");
PLC_state |= PLC_STATE_WAITDEBUG_PIPE_CREATED;
/* open WaitDebug_pipe*/
- if((WaitDebug_pipe_fd = open(WAITDEBUG_PIPE_DEVICE, O_RDWR)) == -1) goto error;
+ if((WaitDebug_pipe_fd = open(WAITDEBUG_PIPE_DEVICE, O_RDWR)) == -1)
+ _startPLCLog(FO WAITDEBUG_PIPE_DEVICE);
PLC_state |= PLC_STATE_WAITDEBUG_FILE_OPENED;
/* create WaitPython_pipe */
- if(rt_pipe_create(&WaitPython_pipe, "WaitPython_pipe", WAITPYTHON_PIPE_MINOR, PIPE_SIZE))
- goto error;
+ if(rt_pipe_create(&WaitPython_pipe, "WaitPython_pipe", WAITPYTHON_PIPE_MINOR, PIPE_SIZE) < 0)
+ _startPLCLog(FO "WaitPython_pipe real-time end");
PLC_state |= PLC_STATE_WAITPYTHON_PIPE_CREATED;
/* open WaitPython_pipe*/
- if((WaitPython_pipe_fd = open(WAITPYTHON_PIPE_DEVICE, O_RDWR)) == -1) goto error;
+ if((WaitPython_pipe_fd = open(WAITPYTHON_PIPE_DEVICE, O_RDWR)) == -1)
+ _startPLCLog(FO WAITPYTHON_PIPE_DEVICE);
PLC_state |= PLC_STATE_WAITPYTHON_FILE_OPENED;
/*** create PLC task ***/
- if(rt_task_create(&PLC_task, "PLC_task", 0, 50, T_JOINABLE)) goto error;
+ if(rt_task_create(&PLC_task, "PLC_task", 0, 50, T_JOINABLE))
+ _startPLCLog("Failed creating PLC task");
PLC_state |= PLC_STATE_TASK_CREATED;
if(__init(argc,argv)) goto error;
/* start PLC task */
- if(rt_task_start(&PLC_task, &PLC_task_proc, NULL)) goto error;
+ if(rt_task_start(&PLC_task, &PLC_task_proc, NULL))
+ _startPLCLog("Failed starting PLC task");
return 0;
@@ -241,7 +277,6 @@
return 0;
}
-#define DEBUG_UNLOCK 1
void LeaveDebugSection(void)
{
if(AtomicCompareExchange( &debug_state,
@@ -254,7 +289,6 @@
extern unsigned long __tick;
-#define DEBUG_PENDING_DATA 1
int WaitDebugData(unsigned long *tick)
{
char cmd;
@@ -304,8 +338,6 @@
AtomicCompareExchange( &debug_state, DEBUG_BUSY, DEBUG_FREE);
}
-#define PYTHON_PENDING_COMMAND 1
-
#define PYTHON_FREE 0
#define PYTHON_BUSY 1
static long python_state = PYTHON_FREE;
--- a/targets/plc_main_head.c Mon Mar 12 14:10:19 2018 +0000
+++ b/targets/plc_main_head.c Tue May 22 17:28:18 2018 +0300
@@ -35,6 +35,9 @@
/* Help to quit cleanly when init fail at a certain level */
static int init_level = 0;
+/* Prototype for Logging to help spotting errors at init */
+int LogMessage(uint8_t level, char* buf, uint32_t size);
+
/*
* Prototypes of functions exported by plugins
**/