--- a/svghmi/svghmi.c Thu Sep 12 12:56:47 2019 +0200
+++ b/svghmi/svghmi.c Mon Sep 16 10:54:15 2019 +0200
@@ -118,9 +118,9 @@
if(dsc->wstate == buf_tosend){
// send
- // TODO call the python callback
-
- dsc->wstate = buf_free;
+ // TODO pack data in buffer
+
+ dsc->wstate = buf_free;
}
AtomicCompareExchange(&dsc->wlock, 1, 0);
@@ -138,18 +138,24 @@
memcpy(visible_value_p, src_p, __get_type_enum_size(dsc->type));
}
+static pthread_cond_t UART_WakeCond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t UART_WakeCondLock = PTHREAD_MUTEX_INITIALIZER;
+
int __init_svghmi()
{
bzero(rbuf,sizeof(rbuf));
bzero(wbuf,sizeof(wbuf));
-
- // TODO - sending pthread condition variable
+ continue_collect = 1;
return 0;
}
void __cleanup_svghmi()
{
+ pthread_mutex_lock(&UART_WakeCondLock);
+ continue_collect = 0;
+ pthread_cond_signal(&UART_WakeCond);
+ pthread_mutex_unlock(&UART_WakeCondLock);
}
void __retrieve_svghmi()
@@ -162,21 +168,37 @@
global_write_dirty = 0;
traverse_hmi_tree(write_iterator);
if(global_write_dirty) {
- // TODO : set condition variable to wakeup sending collector
- }
-
-}
-
-void* collect_updates_to_send(void* args){
-
- // TODO : get callback from args
-
-
- // TODO : wait for
- // - condition variable
-
- // TODO add arg to traverse_hmi_tree to pass callback
-
- traverse_hmi_tree(send_iterator);
-
-}
+ pthread_cond_signal(&UART_WakeCond);
+ }
+}
+
+/* PYTHON CALLS */
+int svghmi_send_collect(uint32_t *size, void *ptr){
+
+ pthread_mutex_lock(&UART_WakeCondLock);
+ do_collect = continue_collect;
+ if do_collect;
+ pthread_cond_wait(&UART_WakeCond, &UART_WakeCondLock);
+ do_collect = continue_collect;
+ pthread_mutex_unlock(&UART_WakeCondLock);
+
+
+ if(do_collect) {
+ traverse_hmi_tree(send_iterator);
+ /* TODO set ptr and size to something */
+ return 0;
+ }
+ else
+ {
+ return EINTR;
+ }
+}
+
+int svghmi_recv_dispatch(uint32_t size, void* ptr){
+ /* TODO something with ptr and size
+ - subscribe
+ or
+ - spread values
+ */
+}
+
--- a/svghmi/svghmi.py Thu Sep 12 12:56:47 2019 +0200
+++ b/svghmi/svghmi.py Mon Sep 16 10:54:15 2019 +0200
@@ -170,6 +170,7 @@
variable_decl_array = []
extern_variables_declarations = []
buf_index = 0
+ item_count = 0
for node in hmi_tree_root.traverse():
if hasattr(node, "iectype"):
sz = DebugTypesSize.get(node.iectype, 0)
@@ -183,6 +184,7 @@
}[node.vartype] + ", " +
str(buf_index) + ", 0, }"]
buf_index += sz
+ item_count += 1
if len(node.path) == 1:
extern_variables_declarations += [
"extern __IEC_" + node.iectype + "_" +
@@ -208,6 +210,7 @@
"variable_decl_array": ",\n".join(variable_decl_array),
"extern_variables_declarations": "\n".join(extern_variables_declarations),
"buffer_size": buf_index,
+ "item_count": item_count,
"var_access_code": targets.GetCode("var_access.c"),
"PLC_ticktime": self.GetCTR().GetTicktime()
}
--- a/svghmi/svghmi_server.py Thu Sep 12 12:56:47 2019 +0200
+++ b/svghmi/svghmi_server.py Mon Sep 16 10:54:15 2019 +0200
@@ -31,7 +31,7 @@
svghmi_recv_dispatch = PLCBinary.svghmi_recv_dispatch
svghmi_recv_dispatch.restype = ctypes.c_int # error or 0
svghmi_recv_dispatch.argtypes = [
- ctypes.POINTER(ctypes.c_uint32), # size
+ ctypes.c_uint32, # size
ctypes.POINTER(ctypes.c_void_p)] # data ptr
# TODO multiclient : switch to arrays
@@ -39,16 +39,18 @@
assert(svghmi_session)
size = ctypes.c_uint32()
ptr = ctypes.c_void_p()
- while res == 0:
- res = svghmi_send_collect(ctypes.byref(size), ctypes.byref(ptr))
+ res = 0
+ while svghmi_send_collect(ctypes.byref(size), ctypes.byref(ptr)) == 0 and \
+ svghmi_session is not None and \
+ svghmi_session.sendMessage(ctypes.string_at(ptr,size)) == 0:
+ pass
- # TODO multiclient : dispatch to sessions
- svghmi_session.sendMessage(ctypes.string_at(ptr,size))
+ # TODO multiclient : dispatch to sessions
class HMISession(object):
def __init__(self, protocol_instance):
global svghmi_session
-
+
# TODO: kill existing session for robustness
assert(svghmi_session is None)
@@ -59,7 +61,7 @@
# svghmi_sessions.append(self)
# get a unique bit index amont other svghmi_sessions,
# so that we can match flags passed by C->python callback
-
+
def __del__(self):
global svghmi_session
assert(svghmi_session)
@@ -76,7 +78,7 @@
# TODO multiclient : pass client index as well
pass
-
+
def sendMessage(self, msg):
self.sendMessage(msg, True)
@@ -99,31 +101,40 @@
self._hmi_session.onMessage(msg)
print msg
#self.sendMessage(msg, binary)
-
+
svghmi_root = None
svghmi_listener = None
+svghmi_send_thread = None
+
# Called by PLCObject at start
def _runtime_svghmi0_start():
- global svghmi_listener, svghmi_root
+ global svghmi_listener, svghmi_root, svghmi_send_thread
svghmi_root = Resource()
wsfactory = WebSocketServerFactory()
wsfactory.protocol = HMIProtocol
- # svghmi_root.putChild("",File(".svg"))
- svghmi_root.putChild("ws",WebSocketResource(wsfactory))
+ svghmi_root.putChild("ws", WebSocketResource(wsfactory))
sitefactory = Site(svghmi_root)
svghmi_listener = reactor.listenTCP(8008, sitefactory)
- # TODO
# start a thread that call the C part of SVGHMI
+ svghmi_send_thread = Thread(target=SendThreadProc, name="SVGHMI Send")
+ svghmi_send_thread.start()
# Called by PLCObject at stop
def _runtime_svghmi0_stop():
- global svghmi_listener
+ global svghmi_listener, svghmi_root, svghmi_send_thread
+ svghmi_root.delEntity("ws")
+ svghmi_root = None
svghmi_listener.stopListening()
+ svghmi_listener = None
+ # plc cleanup calls svghmi_(locstring)_cleanup and unlocks send thread
+ svghmi_send_thread.join()
+ svghmi_send_thread = None
+