SVGHMI: Work in progress. C side mostly implemented, neither built nor tested.
--- a/svghmi/gen_index_xhtml.xslt Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/gen_index_xhtml.xslt Mon Sep 30 13:26:11 2019 +0200
@@ -46,22 +46,52 @@
</xsl:text>
<xsl:text>}
</xsl:text>
+ <xsl:text>// svghmi.js
+</xsl:text>
+ <xsl:text>
+</xsl:text>
<xsl:text>(function(){
</xsl:text>
- <xsl:text> var relative_URI = window.location.href.replace(/^http(s?:\/\/[^\/]*)\/.*$/, 'ws$1/ws');
+ <xsl:text> // Open WebSocket to relative "/ws" address
</xsl:text>
- <xsl:text> var ws = new WebSocket(relative_URI);
+ <xsl:text> var ws = new WebSocket(window.location.href.replace(/^http(s?:\/\/[^\/]*)\/.*$/, 'ws$1/ws'));
+</xsl:text>
+ <xsl:text>
+</xsl:text>
+ <xsl:text> // Register message reception handler
</xsl:text>
<xsl:text> ws.onmessage = function (evt) {
</xsl:text>
+ <xsl:text> // TODO : dispatch and cache hmi tree updates
+</xsl:text>
+ <xsl:text>
+</xsl:text>
<xsl:text> var received_msg = evt.data;
</xsl:text>
+ <xsl:text> // TODO : check for hmitree hash header
+</xsl:text>
+ <xsl:text> // if not matching, reload page
+</xsl:text>
<xsl:text> alert("Message is received..."+received_msg);
</xsl:text>
<xsl:text> };
</xsl:text>
+ <xsl:text>
+</xsl:text>
+ <xsl:text> // Once connection established
+</xsl:text>
<xsl:text> ws.onopen = function (evt) {
</xsl:text>
+ <xsl:text> // TODO : enable the HMI (was previously offline, or just starts)
+</xsl:text>
+ <xsl:text> // show main page
+</xsl:text>
+ <xsl:text>
+</xsl:text>
+ <xsl:text>
+</xsl:text>
+ <xsl:text> // TODO : prefix with hmitree hash header
+</xsl:text>
<xsl:text> ws.send("test");
</xsl:text>
<xsl:text> };
--- a/svghmi/gen_index_xhtml.ysl2 Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/gen_index_xhtml.ysl2 Mon Sep 30 13:26:11 2019 +0200
@@ -50,6 +50,8 @@
apply "@* | node()";
}
script{
+ /* TODO : paste hmitree hash stored in hmi tree root node */
+
||
function evaluate_js_from_descriptions() {
var Page;
--- a/svghmi/svghmi.c Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/svghmi.c Mon Sep 30 13:26:11 2019 +0200
@@ -8,6 +8,8 @@
#define DEFAULT_REFRESH_PERIOD_MS 100
#define HMI_BUFFER_SIZE %(buffer_size)d
#define HMI_ITEM_COUNT %(item_count)d
+#define HMI_HASH_SIZE 8
+static uint8_t hmi_hash[HMI_HASH_SIZE] = {%(hmi_hash_ints)s};
/* PLC reads from that buffer */
static char rbuf[HMI_BUFFER_SIZE];
@@ -15,10 +17,15 @@
/* PLC writes to that buffer */
static char wbuf[HMI_BUFFER_SIZE];
+/* TODO change that in case of multiclient... */
+/* worst biggest send buffer. FIXME : use dynamic alloc ? */
+static char sbuf[HMI_HASH_SIZE + HMI_BUFFER_SIZE + (HMI_ITEM_COUNT * sizeof(uint32_t))];
+static unsigned int sbufidx;
+
%(extern_variables_declarations)s
#define ticktime_ns %(PLC_ticktime)d
-uint16_t ticktime_ms = (ticktime_ns>1000000)?
+static uint16_t ticktime_ms = (ticktime_ns>1000000)?
ticktime_ns/1000000:
1;
@@ -28,7 +35,7 @@
buf_tosend
} buf_state_t;
-int global_write_dirty = 0;
+static int global_write_dirty = 0;
typedef struct {
void *ptr;
@@ -37,12 +44,12 @@
/* publish/write/send */
long wlock;
+ buf_state_t wstate;
+
/* zero means not subscribed */
uint16_t refresh_period_ms;
uint16_t age_ms;
- buf_state_t wstate;
-
/* retrieve/read/recv */
long rlock;
buf_state_t rstate;
@@ -53,16 +60,15 @@
%(variable_decl_array)s
};
-static char sendbuf[HMI_BUFFER_SIZE];
-
-typedef void(*hmi_tree_iterator)(hmi_tree_item_t*);
-void traverse_hmi_tree(hmi_tree_iterator fp)
+typedef int(*hmi_tree_iterator)(uint32_t*, hmi_tree_item_t*);
+static int traverse_hmi_tree(hmi_tree_iterator fp)
{
unsigned int i;
for(i = 0; i < sizeof(hmi_tree_item)/sizeof(hmi_tree_item_t); i++){
+ int res;
hmi_tree_item_t *dsc = &hmi_tree_item[i];
- if(dsc->type != UNKNOWN_ENUM)
- (*fp)(dsc);
+ if(res = (*fp)(i, dsc))
+ return res;
}
}
@@ -70,76 +76,101 @@
%(var_access_code)s
-void write_iterator(hmi_tree_item_t *dsc)
-{
- void *dest_p = &wbuf[dsc->buf_index];
- void *real_value_p = NULL;
- char flags = 0;
-
- void *visible_value_p = UnpackVar(dsc, &real_value_p, &flags);
-
- /* Try take lock */
- long was_locked = AtomicCompareExchange(&dsc->wlock, 0, 1);
-
- if(was_locked) {
- /* was locked. give up*/
- return;
- }
-
- if(dsc->wstate == buf_set){
- /* if being subscribed */
- if(dsc->refresh_period_ms){
- if(dsc->age_ms + ticktime_ms < dsc->refresh_period_ms){
- dsc->age_ms += ticktime_ms;
- }else{
- dsc->wstate = buf_tosend;
- }
- }
- }
-
- /* if new value differs from previous one */
- if(memcmp(dest_p, visible_value_p, __get_type_enum_size(dsc->type)) != 0){
- /* copy and flag as set */
- memcpy(dest_p, visible_value_p, __get_type_enum_size(dsc->type));
- if(dsc->wstate == buf_free) {
- dsc->wstate = buf_set;
- dsc->age_ms = 0;
- }
- global_write_dirty = 1;
- }
-
- /* unlock - use AtomicComparExchange to have memory barrier */
+inline int write_iterator(uint32_t index, hmi_tree_item_t *dsc)
+{
+ if(AtomicCompareExchange(&dsc->wlock, 0, 1) == 0)
+ {
+ if(dsc->wstate == buf_set){
+ /* if being subscribed */
+ if(dsc->refresh_period_ms){
+ if(dsc->age_ms + ticktime_ms < dsc->refresh_period_ms){
+ dsc->age_ms += ticktime_ms;
+ }else{
+ dsc->wstate = buf_tosend;
+ }
+ }
+ }
+
+ void *dest_p = &wbuf[dsc->buf_index];
+ void *real_value_p = NULL;
+ char flags = 0;
+ void *visible_value_p = UnpackVar(dsc, &real_value_p, &flags);
+
+ /* if new value differs from previous one */
+ USINT sz = __get_type_enum_size(dsc->type);
+ if(memcmp(dest_p, visible_value_p, sz) != 0){
+ /* copy and flag as set */
+ memcpy(dest_p, visible_value_p, sz);
+ if(dsc->wstate == buf_free) {
+ dsc->wstate = buf_set;
+ dsc->age_ms = 0;
+ }
+ global_write_dirty = 1;
+ }
+
+ AtomicCompareExchange(&dsc->wlock, 1, 0);
+ }
+ // else ... : PLC can't wait, variable will be updated next turn
+ return 0;
+}
+
+inline int send_iterator(uint32_t index, hmi_tree_item_t *dsc)
+{
+ int res = 0;
+ while(AtomicCompareExchange(&dsc->wlock, 0, 1)) sched_yield();
+
+ if(dsc->wstate == buf_tosend)
+ {
+ uint32_t sz = __get_type_enum_size(dsc->type);
+ if(sbufidx + sizeof(uint32_t) + sz < sizeof(sbuf))
+ {
+ void *src_p = &wbuf[dsc->buf_index];
+ void *dst_p = &sbuf[sbufidx];
+ memcpy(dst_p, &index, sizeof(uint32_t));
+ memcpy(dst_p + sizeof(uint32_t), src_p, sz);
+ dsc->wstate = buf_free;
+ sbufidx += sizeof(uint32_t) /* index */ + sz;
+ }
+ else
+ {
+ res = EOVERFLOW;
+ }
+ }
+
AtomicCompareExchange(&dsc->wlock, 1, 0);
-}
-
-struct timespec sending_now;
-struct timespec next_sending;
-void send_iterator(hmi_tree_item_t *dsc)
+ return res;
+}
+
+inline int read_iterator(uint32_t index, hmi_tree_item_t *dsc)
+{
+ if(AtomicCompareExchange(&dsc->rlock, 0, 1) == 0)
+ {
+ if(dsc->rstate == buf_set)
+ {
+ void *src_p = &rbuf[dsc->buf_index];
+ void *real_value_p = NULL;
+ char flags = 0;
+ void *visible_value_p = UnpackVar(dsc, &real_value_p, &flags);
+ memcpy(real_value_p, src_p, __get_type_enum_size(dsc->type));
+ dsc->rstate = buf_free;
+ }
+ AtomicCompareExchange(&dsc->rlock, 1, 0);
+ }
+ // else ... : PLC can't wait, variable will be updated next turn
+ return 0;
+}
+
+inline void update_refresh_period(hmi_tree_item_t *dsc, uint16_t refresh_period_ms)
{
while(AtomicCompareExchange(&dsc->wlock, 0, 1)) sched_yield();
-
- // check for variable being modified
- if(dsc->wstate == buf_tosend){
- // send
-
- // TODO pack data in buffer
-
- dsc->wstate = buf_free;
- }
-
+ dsc->refresh_period_ms = refresh_period_ms;
AtomicCompareExchange(&dsc->wlock, 1, 0);
}
-void read_iterator(hmi_tree_item_t *dsc)
-{
- void *src_p = &rbuf[dsc->buf_index];
- void *real_value_p = NULL;
- char flags = 0;
-
- void *visible_value_p = UnpackVar(dsc, &real_value_p, &flags);
-
-
- memcpy(visible_value_p, src_p, __get_type_enum_size(dsc->type));
+inline int reset_iterator(uint32_t index, hmi_tree_item_t *dsc)
+{
+ update_refresh_period(*dsc, 0);
+ return 0;
}
static pthread_cond_t svghmi_send_WakeCond = PTHREAD_COND_INITIALIZER;
@@ -184,17 +215,23 @@
int do_collect;
pthread_mutex_lock(&svghmi_send_WakeCondLock);
do_collect = continue_collect;
- if(do_collect){
+ if(do_collect)
+ {
pthread_cond_wait(&svghmi_send_WakeCond, &svghmi_send_WakeCondLock);
do_collect = continue_collect;
}
pthread_mutex_unlock(&svghmi_send_WakeCondLock);
-
if(do_collect) {
- traverse_hmi_tree(send_iterator);
- /* TODO set ptr and size to something */
- return 0;
+ int res;
+ memcpy(&sbuf[0], &hmi_hash[0], HMI_HASH_SIZE);
+ sbufidx = HMI_HASH_SIZE;
+ if((res = traverse_hmi_tree(send_iterator)) == 0)
+ {
+ *ptr = &sbuf[0];
+ *size = sbufidx;
+ }
+ return res;
}
else
{
@@ -202,12 +239,102 @@
}
}
-int svghmi_recv_dispatch(uint32_t size, char *ptr){
- printf("%%*s",size,ptr);
- /* TODO something with ptr and size
- - subscribe
- or
- - spread values
- */
-}
-
+typedef enum {
+ setval = 0,
+ reset = 1,
+ subscribe = 2,
+ unsubscribe = 3
+} cmd_from_JS;
+
+int svghmi_recv_dispatch(uint32_t size, const uint8_t *ptr){
+ const uint8_t* cursor = ptr + HMI_HASH_SIZE;
+ const uint8_t* end = ptr + size;
+
+ printf("svghmi_recv_dispatch %d\n",size);
+
+ /* match hmitree fingerprint */
+ if(size <= HMI_HASH_SIZE || memcmp(ptr, hmihash, HMI_HASH_SIZE) != 0)
+ {
+ printf("svghmi_recv_dispatch MISMATCH !!\n");
+ return EINVAL;
+ }
+
+ while(cursor < end)
+ {
+ uint32_t progress;
+ cmd_from_JS cmd = *(cursor++);
+ switch(cmd)
+ {
+ case setval:
+ {
+ uint32_t index = *(uint32_t*)(cursor);
+ uint8_t *valptr = cursor + sizeof(uint32_t);
+
+ if(index < HMI_ITEM_COUNT)
+ {
+ hmi_tree_item_t *dsc = &hmi_tree_item[index];
+ void *real_value_p = NULL;
+ char flags = 0;
+ void *visible_value_p = UnpackVar(dsc, &real_value_p, &flags);
+ void *dst_p = &rbuf[dsc->buf_index];
+ uint32_t sz = __get_type_enum_size(dsc->type);
+
+ if(valptr + sz < end)
+ {
+ // rescheduling spinlock until free
+ while(AtomicCompareExchange(&dsc->rlock, 0, 1)) sched_yield();
+
+ memcpy(dst_p, valptr, sz);
+ dsc->rstate = buf_set;
+
+ AtomicCompareExchange(&dsc->rlock, 1, 0);
+ progress = sz + sizeof(uint32_t) /* index */;
+ }
+ else return -EINVAL;
+ }
+ else return -EINVAL;
+ }
+ break;
+
+ case reset:
+ {
+ progress = 0;
+ traverse_hmi_tree(reset_iterator);
+ }
+ break;
+
+ case subscribe:
+ {
+ uint32_t index = *(uint32_t*)(cursor);
+ uint16_t refresh_period_ms = *(uint32_t*)(cursor + sizeof(uint32_t));
+
+ if(index < HMI_ITEM_COUNT)
+ {
+ hmi_tree_item_t *dsc = &hmi_tree_item[index];
+ update_refresh_period(*dsc, refresh_period_ms);
+ }
+ else return -EINVAL;
+
+ progress = sizeof(uint32_t) /* index */ +
+ sizeof(uint16_t) /* refresh period */;
+ }
+ break;
+
+ case unsubscribe:
+ {
+ if(index < HMI_ITEM_COUNT)
+ {
+ hmi_tree_item_t *dsc = &hmi_tree_item[index];
+ reset_iterator(index, dsc);
+ }
+ else return -EINVAL;
+
+ progress = sizeof(uint32_t) /* index */;
+ }
+ break;
+ }
+ cursor += progress;
+ }
+ return 0;
+}
+
--- a/svghmi/svghmi.js Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/svghmi.js Mon Sep 30 13:26:11 2019 +0200
@@ -9,6 +9,8 @@
// TODO : dispatch and cache hmi tree updates
var received_msg = evt.data;
+ // TODO : check for hmitree hash header
+ // if not matching, reload page
alert("Message is received..."+received_msg);
};
@@ -17,6 +19,8 @@
// TODO : enable the HMI (was previously offline, or just starts)
// show main page
+
+ // TODO : prefix with hmitree hash header
ws.send("test");
};
})();
--- a/svghmi/svghmi.py Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/svghmi.py Mon Sep 30 13:26:11 2019 +0200
@@ -77,11 +77,14 @@
else:
self.children.append(node)
- def etree(self):
+ def etree(self, add_hash=False):
attribs = dict(name=self.name)
if self.path is not None:
- attribs["path"]=".".join(self.path)
+ attribs["path"] = ".".join(self.path)
+
+ if add_hash:
+ attribs["hash"] = ",".join(map(str,self.hash()))
res = etree.Element(self.nodetype, **attribs)
@@ -98,6 +101,20 @@
for yoodl in c.traverse():
yield yoodl
+
+ def hash(self):
+ """ Produce a hash, any change in HMI tree structure change that hash """
+ s = hashlib.new('md5')
+ self._hash(s)
+ # limit size to HMI_HASH_SIZE as in svghmi.c
+ return map(ord,s.digest())[:8]
+
+ def _hash(self, s):
+ s.update(str((self.name,self.nodetype)))
+ if hasattr(self, "children"):
+ for c in self.children:
+ c._hash(s)
+
# module scope for HMITree root
# so that CTN can use HMITree deduced in Library
# note: this only works because library's Generate_C is
@@ -110,7 +127,7 @@
return paths.AbsNeighbourFile(__file__, "pous.xml")
def Generate_C(self, buildpath, varlist, IECCFLAGS):
- global hmi_tree_root
+ global hmi_tree_root, hmi_tree_unique_id
"""
PLC Instance Tree:
@@ -170,7 +187,8 @@
buf_index = 0
item_count = 0
for node in hmi_tree_root.traverse():
- if hasattr(node, "iectype"):
+ if hasattr(node, "iectype") and \
+ node.nodetype not in ["HMI_CLASS", "HMI_LABEL"]:
sz = DebugTypesSize.get(node.iectype, 0)
variable_decl_array += [
"{&(" + ".".join(node.path) + "), " + node.iectype + {
@@ -210,7 +228,8 @@
"buffer_size": buf_index,
"item_count": item_count,
"var_access_code": targets.GetCode("var_access.c"),
- "PLC_ticktime": self.GetCTR().GetTicktime()
+ "PLC_ticktime": self.GetCTR().GetTicktime(),
+ "hmi_hash_int": ",".join(map(str,hmi_tree_root.hash()))
}
gen_svghmi_c_path = os.path.join(buildpath, "svghmi.c")
@@ -243,6 +262,7 @@
</xsd:element>
</xsd:schema>
"""
+ # TODO : add comma separated supported language list
ConfNodeMethods = [
{
@@ -258,6 +278,10 @@
"method": "_StartInkscape"
},
+ # TODO : Launch POEdit button
+ # PO -> SVG layers button
+ # SVG layers -> PO
+
# TODO : HMITree button
# - can drag'n'drop variabes to Inkscape
@@ -297,7 +321,7 @@
def GetHMITree(self):
global hmi_tree_root
- res = [hmi_tree_root.etree()]
+ res = [hmi_tree_root.etree(add_hash=True)]
return res
def CTNGenerate_C(self, buildpath, locations):
--- a/svghmi/svghmi_server.py Fri Sep 27 06:54:35 2019 +0200
+++ b/svghmi/svghmi_server.py Mon Sep 30 13:26:11 2019 +0200
@@ -31,7 +31,7 @@
svghmi_recv_dispatch = PLCBinary.svghmi_recv_dispatch
svghmi_recv_dispatch.restype = ctypes.c_int # error or 0
svghmi_recv_dispatch.argtypes = [
- ctypes.c_uint32, # size
+ ctypes.c_uint32, # size
ctypes.c_char_p] # data ptr
# TODO multiclient : switch to arrays
@@ -60,8 +60,6 @@
def onMessage(self, msg):
# pass message to the C side recieve_message()
- c_string = ctypes.c_char_p(msg)
- c_string_pointer = ctypes.c_void_p(ctypes.addressof(c_string))
svghmi_recv_dispatch(len(msg), msg)
# TODO multiclient : pass client index as well