297 int period_nsec = (client_nodes[client_node_id].comm_period %%1000)*1000000; /* comm_period is in ms */ |
299 int period_nsec = (client_nodes[client_node_id].comm_period %%1000)*1000000; /* comm_period is in ms */ |
298 |
300 |
299 // Enable thread cancelation. Enabled is default, but set it anyway to be safe. |
301 // Enable thread cancelation. Enabled is default, but set it anyway to be safe. |
300 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
302 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL); |
301 |
303 |
302 // get the current time |
304 // configure the timer for periodic activation |
303 clock_gettime(CLOCK_MONOTONIC, &next_cycle); |
305 { |
304 |
306 struct itimerspec timerspec; |
305 // loop the communication with the client |
307 timerspec.it_interval.tv_sec = period_sec; |
|
308 timerspec.it_interval.tv_nsec = period_nsec; |
|
309 timerspec.it_value = timerspec.it_interval; |
|
310 |
|
311 if (timer_settime(client_nodes[client_node_id].timer_id, 0 /* flags */, &timerspec, NULL) < 0) |
|
312 fprintf(stderr, "Modbus plugin: Error configuring periodic activation timer for Modbus client %%s.\n", client_nodes[client_node_id].location); |
|
313 } |
|
314 |
|
315 /* loop the communication with the client |
|
316 * |
|
317 * When the client thread has difficulty communicating with remote client and/or server (network issues, for example), |
|
318 * then the communications get delayed and we will fall behind in the period. |
|
319 * |
|
320 * This is OK. Note that if the condition variable were to be signaled multiple times while the client thread is inside the same |
|
321 * Modbus transaction, then all those signals would be ignored. |
|
322 * However, and since we keep the mutex locked during the communication cycle, it is not possible to signal the condition variable |
|
323 * during that time (it is only possible while the thread is blocked during the call to pthread_cond_wait(). |
|
324 * |
|
325 * This means that when network issues eventually get resolved, we will NOT have a bunch of delayed activations to handle |
|
326 * in quick succession (which would goble up CPU time). |
|
327 * |
|
328 * Notice that the above property is valid whether the communication cycle is run with the mutex locked, or unlocked. |
|
329 * Since it makes it easier to implement the correct semantics for the other activation methods if the communication cycle |
|
330 * is run with the mutex locked, then that is what we do. |
|
331 * |
|
332 * Note that during all the communication cycle we will keep locked the mutex |
|
333 * (i.e. the mutex used together with the condition variable that will activate a new communication cycle) |
|
334 * |
|
335 * Note that we never get to explicitly unlock this mutex. It will only be unlocked by the pthread_cond_wait() |
|
336 * call at the end of the cycle. |
|
337 */ |
|
338 pthread_mutex_lock(&(client_nodes[client_node_id].mutex)); |
|
339 |
306 while (1) { |
340 while (1) { |
307 /* |
341 /* |
308 struct timespec cur_time; |
342 struct timespec cur_time; |
309 clock_gettime(CLOCK_MONOTONIC, &cur_time); |
343 clock_gettime(CLOCK_MONOTONIC, &cur_time); |
310 fprintf(stderr, "Modbus client thread - new cycle (%%ld:%%ld)!\n", cur_time.tv_sec, cur_time.tv_nsec); |
344 fprintf(stderr, "Modbus client thread - new cycle (%%ld:%%ld)!\n", cur_time.tv_sec, cur_time.tv_nsec); |
311 */ |
345 */ |
312 int req; |
346 int req; |
313 for (req=0; req < NUMBER_OF_CLIENT_REQTS; req ++){ |
347 for (req=0; req < NUMBER_OF_CLIENT_REQTS; req ++){ |
314 /*just do the requests belonging to the client */ |
348 /* just do the requests belonging to the client */ |
315 if (client_requests[req].client_node_id != client_node_id) |
349 if (client_requests[req].client_node_id != client_node_id) |
316 continue; |
350 continue; |
|
351 |
|
352 /* only do the request if: |
|
353 * - this request was explictly asked to be executed by the client program |
|
354 * OR |
|
355 * - the client thread was activated periodically |
|
356 * (in which case we execute all the requests belonging to the client node) |
|
357 */ |
|
358 if ((client_requests[req].flag_exec_req == 0) && (client_nodes[client_requests[req].client_node_id].periodic_act == 0)) |
|
359 continue; |
|
360 |
|
361 //fprintf(stderr, "Modbus plugin: RUNNING<###> of Modbus request %%d (periodic = %%d flag_exec_req = %%d)\n", |
|
362 // req, client_nodes[client_requests[req].client_node_id].periodic_act, client_requests[req].flag_exec_req ); |
|
363 |
317 int res_tmp = __execute_mb_request(req); |
364 int res_tmp = __execute_mb_request(req); |
318 switch (res_tmp) { |
365 switch (res_tmp) { |
319 case PORT_FAILURE: { |
366 case PORT_FAILURE: { |
320 if (res_tmp != client_nodes[client_node_id].prev_error) |
367 if (res_tmp != client_nodes[client_node_id].prev_error) |
321 fprintf(stderr, "Modbus plugin: Error connecting Modbus client %%s to remote server.\n", client_nodes[client_node_id].location); |
368 fprintf(stderr, "Modbus plugin: Error connecting Modbus client %%s to remote server.\n", client_nodes[client_node_id].location); |
355 client_nodes[client_node_id].prev_error = 0; |
402 client_nodes[client_node_id].prev_error = 0; |
356 client_requests[req] .prev_error = 0; |
403 client_requests[req] .prev_error = 0; |
357 break; |
404 break; |
358 } |
405 } |
359 } |
406 } |
360 } |
407 |
361 // Determine absolute time instant for starting the next cycle |
408 /* We have just finished excuting a client transcation request. |
362 struct timespec prev_cycle, now; |
409 * If the current cycle was activated by user request we reset the flag used to ask to run it |
363 prev_cycle = next_cycle; |
410 */ |
364 timespec_add(next_cycle, period_sec, period_nsec); |
411 if (0 != client_requests[req].flag_exec_req) { |
365 /* NOTE A: |
412 client_requests[req].flag_exec_req = 0; |
366 * When we have difficulty communicating with remote client and/or server, then the communications get delayed and we will |
413 client_requests[req].flag_exec_started = 0; |
367 * fall behind in the period. This means that when communication is re-established we may end up running this loop continuously |
414 } |
368 * for some time until we catch up. |
415 |
369 * This is undesirable, so we detect it by making sure the next_cycle will start in the future. |
416 //fprintf(stderr, "Modbus plugin: RUNNING<---> of Modbus request %%d (periodic = %%d flag_exec_req = %%d)\n", |
370 * When this happens we will switch from a purely periodic task _activation_ sequence, to a fixed task suspension interval. |
417 // req, client_nodes[client_requests[req].client_node_id].periodic_act, client_requests[req].flag_exec_req ); |
371 * |
418 } |
372 * NOTE B: |
419 |
373 * It probably does not make sense to check for overflow of timer - so we don't do it for now! |
420 // Wait for signal (from timer or explicit request from user program) before starting the next cycle |
374 * Even in 32 bit systems this will take at least 68 years since the computer booted |
421 { |
375 * (remember, we are using CLOCK_MONOTONIC, which should start counting from 0 |
422 // No need to lock the mutex. Is is already locked just before the while(1) loop. |
376 * every time the system boots). On 64 bit systems, it will take over |
423 // Read the comment there to understand why. |
377 * 10^11 years to overflow. |
424 // pthread_mutex_lock(&(client_nodes[client_node_id].mutex)); |
378 */ |
425 |
379 clock_gettime(CLOCK_MONOTONIC, &now); |
426 /* the client thread has just finished a cycle, so all the flags used to signal an activation |
380 if ( ((now.tv_sec > next_cycle.tv_sec) || ((now.tv_sec == next_cycle.tv_sec) && (now.tv_nsec > next_cycle.tv_nsec))) |
427 * and specify the activation source (periodic, user request, ...) |
381 /* We are falling behind. See NOTE A above */ |
428 * get reset here, before waiting for a new activation. |
382 || (next_cycle.tv_sec < prev_cycle.tv_sec) |
429 */ |
383 /* Timer overflow. See NOTE B above */ |
430 client_nodes[client_node_id].periodic_act = 0; |
384 ) { |
431 client_nodes[client_node_id].execute_req = 0; |
385 next_cycle = now; |
432 |
386 timespec_add(next_cycle, period_sec, period_nsec); |
433 while (client_nodes[client_node_id].execute_req == 0) |
387 } |
434 pthread_cond_wait(&(client_nodes[client_node_id].condv), |
388 |
435 &(client_nodes[client_node_id].mutex)); |
389 clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &next_cycle, NULL); |
436 |
|
437 // We run the communication cycle with the mutex locked. |
|
438 // Read the comment just above the while(1) to understand why. |
|
439 // pthread_mutex_unlock(&(client_nodes[client_node_id].mutex)); |
|
440 } |
390 } |
441 } |
391 |
442 |
392 // humour the compiler. |
443 // humour the compiler. |
393 return NULL; |
444 return NULL; |
394 } |
445 } |
|
446 |
|
447 |
|
448 |
|
449 /* Function to activate a client node's thread */ |
|
450 /* returns -1 if it could not send the signal */ |
|
451 static int __signal_client_thread(int client_node_id) { |
|
452 /* We TRY to signal the client thread. |
|
453 * We do this because this function can be called at the end of the PLC scan cycle |
|
454 * and we don't want it to block at that time. |
|
455 */ |
|
456 if (pthread_mutex_trylock(&(client_nodes[client_node_id].mutex)) != 0) |
|
457 return -1; |
|
458 client_nodes[client_node_id].execute_req = 1; // tell the thread to execute |
|
459 pthread_cond_signal (&(client_nodes[client_node_id].condv)); |
|
460 pthread_mutex_unlock(&(client_nodes[client_node_id].mutex)); |
|
461 return 0; |
|
462 } |
|
463 |
|
464 |
|
465 |
|
466 /* Function that will be called whenever a client node's periodic timer expires. */ |
|
467 /* The client node's thread will be waiting on a condition variable, so this function simply signals that |
|
468 * condition variable. |
|
469 * |
|
470 * The same callback function is called by the timers of all client nodes. The id of the client node |
|
471 * in question will be passed as a parameter to the call back function. |
|
472 */ |
|
473 void __client_node_timer_callback_function(union sigval sigev_value) { |
|
474 /* signal the client node's condition variable on which the client node's thread should be waiting... */ |
|
475 /* Since the communication cycle is run with the mutex locked, we use trylock() instead of lock() */ |
|
476 //pthread_mutex_lock (&(client_nodes[sigev_value.sival_int].mutex)); |
|
477 if (pthread_mutex_trylock (&(client_nodes[sigev_value.sival_int].mutex)) != 0) |
|
478 /* we never get to signal the thread for activation. But that is OK. |
|
479 * If it still in the communication cycle (during which the mutex is kept locked) |
|
480 * then that means that the communication cycle is falling behing in the periodic |
|
481 * communication cycle, and we therefore need to skip a period. |
|
482 */ |
|
483 return; |
|
484 client_nodes[sigev_value.sival_int].execute_req = 1; // tell the thread to execute |
|
485 client_nodes[sigev_value.sival_int].periodic_act = 1; // tell the thread the activation was done by periodic timer |
|
486 pthread_cond_signal (&(client_nodes[sigev_value.sival_int].condv)); |
|
487 pthread_mutex_unlock(&(client_nodes[sigev_value.sival_int].mutex)); |
|
488 } |
|
489 |
395 |
490 |
396 |
491 |
397 int __cleanup_%(locstr)s (); |
492 int __cleanup_%(locstr)s (); |
398 int __init_%(locstr)s (int argc, char **argv){ |
493 int __init_%(locstr)s (int argc, char **argv){ |
399 int index; |
494 int index; |
400 |
495 |
401 for (index=0; index < NUMBER_OF_CLIENT_NODES;index++) |
496 for (index=0; index < NUMBER_OF_CLIENT_NODES;index++) { |
402 client_nodes[index].mb_nd = -1; |
497 client_nodes[index].mb_nd = -1; |
403 for (index=0; index < NUMBER_OF_SERVER_NODES;index++) |
498 /* see comment in mb_runtime.h to understad why we need to initialize these entries */ |
|
499 switch (client_nodes[index].node_address.naf) { |
|
500 case naf_tcp: |
|
501 client_nodes[index].node_address.addr.tcp.host = client_nodes[index].str1; |
|
502 client_nodes[index].node_address.addr.tcp.service = client_nodes[index].str2; |
|
503 break; |
|
504 case naf_rtu: |
|
505 client_nodes[index].node_address.addr.rtu.device = client_nodes[index].str1; |
|
506 break; |
|
507 } |
|
508 } |
|
509 |
|
510 for (index=0; index < NUMBER_OF_SERVER_NODES;index++) { |
404 // mb_nd with negative numbers indicate how far it has been initialised (or not) |
511 // mb_nd with negative numbers indicate how far it has been initialised (or not) |
405 // -2 --> no modbus node created; no thread created |
512 // -2 --> no modbus node created; no thread created |
406 // -1 --> modbus node created!; no thread created |
513 // -1 --> modbus node created!; no thread created |
407 // >=0 --> modbus node created!; thread created! |
514 // >=0 --> modbus node created!; thread created! |
408 server_nodes[index].mb_nd = -2; |
515 server_nodes[index].mb_nd = -2; |
|
516 /* see comment in mb_runtime.h to understad why we need to initialize these entries */ |
|
517 switch (server_nodes[index].node_address.naf) { |
|
518 case naf_tcp: |
|
519 server_nodes[index].node_address.addr.tcp.host = server_nodes[index].str1; |
|
520 server_nodes[index].node_address.addr.tcp.service = server_nodes[index].str2; |
|
521 break; |
|
522 case naf_rtu: |
|
523 server_nodes[index].node_address.addr.rtu.device = server_nodes[index].str1; |
|
524 break; |
|
525 } |
|
526 } |
409 |
527 |
410 /* modbus library init */ |
528 /* modbus library init */ |
411 /* Note that TOTAL_xxxNODE_COUNT are the nodes required by _ALL_ the instances of the modbus |
529 /* Note that TOTAL_xxxNODE_COUNT are the nodes required by _ALL_ the instances of the modbus |
412 * extension currently in the user's project. This file (MB_xx.c) is handling only one instance, |
530 * extension currently in the user's project. This file (MB_xx.c) is handling only one instance, |
413 * but must initialize the library for all instances. Only the first call to mb_slave_and_master_init() |
531 * but must initialize the library for all instances. Only the first call to mb_slave_and_master_init() |
441 fprintf(stderr, "Modbus plugin: Error creating modbus client node %%s\n", client_nodes[index].location); |
564 fprintf(stderr, "Modbus plugin: Error creating modbus client node %%s\n", client_nodes[index].location); |
442 goto error_exit; |
565 goto error_exit; |
443 } |
566 } |
444 client_nodes[index].init_state = 1; // we have created the node |
567 client_nodes[index].init_state = 1; // we have created the node |
445 |
568 |
|
569 /* initialize the mutex variable that will be used by the thread handling the client node */ |
|
570 if (pthread_mutex_init(&(client_nodes[index].mutex), NULL) < 0) { |
|
571 fprintf(stderr, "Modbus plugin: Error creating mutex for modbus client node %%s\n", client_nodes[index].location); |
|
572 goto error_exit; |
|
573 } |
|
574 client_nodes[index].init_state = 2; // we have created the mutex |
|
575 |
|
576 /* initialize the condition variable that will be used by the thread handling the client node */ |
|
577 if (pthread_cond_init(&(client_nodes[index].condv), NULL) < 0) { |
|
578 fprintf(stderr, "Modbus plugin: Error creating condition variable for modbus client node %%s\n", client_nodes[index].location); |
|
579 goto error_exit; |
|
580 } |
|
581 client_nodes[index].execute_req = 0; //variable associated with condition variable |
|
582 client_nodes[index].init_state = 3; // we have created the condition variable |
|
583 |
|
584 /* initialize the timer that will be used to periodically activate the client node */ |
|
585 { |
|
586 // start off by reseting the flag that will be set whenever the timer expires |
|
587 client_nodes[index].periodic_act = 0; |
|
588 |
|
589 struct sigevent evp; |
|
590 evp.sigev_notify = SIGEV_THREAD; /* Notification method - call a function in a new thread context */ |
|
591 evp.sigev_value.sival_int = index; /* Data passed to function upon notification - used to indentify which client node to activate */ |
|
592 evp.sigev_notify_function = __client_node_timer_callback_function; /* function to call upon timer expiration */ |
|
593 evp.sigev_notify_attributes = NULL; /* attributes for new thread in which sigev_notify_function will be called/executed */ |
|
594 |
|
595 if (timer_create(CLOCK_MONOTONIC, &evp, &(client_nodes[index].timer_id)) < 0) { |
|
596 fprintf(stderr, "Modbus plugin: Error creating timer for modbus client node %%s\n", client_nodes[index].location); |
|
597 goto error_exit; |
|
598 } |
|
599 } |
|
600 client_nodes[index].init_state = 4; // we have created the timer |
|
601 |
446 /* launch a thread to handle this client node */ |
602 /* launch a thread to handle this client node */ |
447 { |
603 { |
448 int res = 0; |
604 int res = 0; |
449 pthread_attr_t attr; |
605 pthread_attr_t attr; |
450 res |= pthread_attr_init(&attr); |
606 res |= pthread_attr_init(&attr); |
451 res |= pthread_create(&(client_nodes[index].thread_id), &attr, &__mb_client_thread, (void *)((char *)NULL + index)); |
607 res |= pthread_create(&(client_nodes[index].thread_id), &attr, &__mb_client_thread, (void *)((char *)NULL + index)); |
452 if (res != 0) { |
608 if (res != 0) { |
453 fprintf(stderr, "Modbus plugin: Error starting modbus client thread for node %%s\n", client_nodes[index].location); |
609 fprintf(stderr, "Modbus plugin: Error starting thread for modbus client node %%s\n", client_nodes[index].location); |
454 goto error_exit; |
610 goto error_exit; |
455 } |
611 } |
456 } |
612 } |
457 client_nodes[index].init_state = 2; // we have created the node and a thread |
613 client_nodes[index].init_state = 5; // we have created the thread |
458 } |
614 } |
459 |
615 |
460 /* init each local server */ |
616 /* init each local server */ |
461 /* NOTE: All server_nodes[].init_state are initialised to 0 in the code |
617 /* NOTE: All server_nodes[].init_state are initialised to 0 in the code |
462 * generated by the modbus plugin |
618 * generated by the modbus plugin |
497 |
653 |
498 void __publish_%(locstr)s (){ |
654 void __publish_%(locstr)s (){ |
499 int index; |
655 int index; |
500 |
656 |
501 for (index=0; index < NUMBER_OF_CLIENT_REQTS; index ++){ |
657 for (index=0; index < NUMBER_OF_CLIENT_REQTS; index ++){ |
502 /*just do the output requests */ |
658 /* synchronize the PLC and MB buffers only for the output requests */ |
503 if (client_requests[index].req_type == req_output){ |
659 if (client_requests[index].req_type == req_output){ |
|
660 |
|
661 // lock the mutex brefore copying the data |
504 if(pthread_mutex_trylock(&(client_requests[index].coms_buf_mutex)) == 0){ |
662 if(pthread_mutex_trylock(&(client_requests[index].coms_buf_mutex)) == 0){ |
|
663 |
|
664 // Check if user configured this MB request to be activated whenever the data to be written changes |
|
665 if (client_requests[index].write_on_change) { |
|
666 // Let's check if the data did change... |
|
667 // compare the data in plcv_buffer to coms_buffer |
|
668 int res; |
|
669 res = memcmp((void *)client_requests[index].coms_buffer /* buf 1 */, |
|
670 (void *)client_requests[index].plcv_buffer /* buf 2*/, |
|
671 REQ_BUF_SIZE * sizeof(u16) /* size in bytes */); |
|
672 |
|
673 // if data changed, activate execution request |
|
674 if (0 != res) |
|
675 client_requests[index].flag_exec_req = 1; |
|
676 } |
|
677 |
505 // copy from plcv_buffer to coms_buffer |
678 // copy from plcv_buffer to coms_buffer |
506 memcpy((void *)client_requests[index].coms_buffer /* destination */, |
679 memcpy((void *)client_requests[index].coms_buffer /* destination */, |
507 (void *)client_requests[index].plcv_buffer /* source */, |
680 (void *)client_requests[index].plcv_buffer /* source */, |
508 REQ_BUF_SIZE * sizeof(u16) /* size in bytes */); |
681 REQ_BUF_SIZE * sizeof(u16) /* size in bytes */); |
509 pthread_mutex_unlock(&(client_requests[index].coms_buf_mutex)); |
682 pthread_mutex_unlock(&(client_requests[index].coms_buf_mutex)); |
510 } |
683 } |
511 } |
684 } |
512 } |
685 /* if the user program set the execution request flag, then activate the thread |
|
686 * that handles this Modbus client transaction so it gets a chance to be executed |
|
687 * (but don't activate the thread if it has already been activated!) |
|
688 * |
|
689 * NOTE that we do this, for both the IN and OUT mapped location, under this |
|
690 * __publish_() function. The scan cycle of the PLC works as follows: |
|
691 * - call __retrieve_() |
|
692 * - execute user programs |
|
693 * - call __publish_() |
|
694 * - insert <delay> until time to start next periodic/cyclic scan cycle |
|
695 * |
|
696 * In an attempt to be able to run the MB transactions during the <delay> |
|
697 * interval in which not much is going on, we handle the user program |
|
698 * requests to execute a specific MB transaction in this __publish_() |
|
699 * function. |
|
700 */ |
|
701 if ((client_requests[index].flag_exec_req != 0) && (0 == client_requests[index].flag_exec_started)) { |
|
702 int client_node_id = client_requests[index].client_node_id; |
|
703 if (__signal_client_thread(client_node_id) >= 0) { |
|
704 /* - upon success, set flag_exec_started |
|
705 * - both flags (flag_exec_req and flag_exec_started) will be reset |
|
706 * once the transaction has completed. |
|
707 */ |
|
708 client_requests[index].flag_exec_started = 1; |
|
709 } |
|
710 } |
|
711 } |
513 } |
712 } |
514 |
713 |
515 |
714 |
516 |
715 |
517 |
716 |
542 int res = 0; |
741 int res = 0; |
543 |
742 |
544 /* kill thread and close connections of each modbus client node */ |
743 /* kill thread and close connections of each modbus client node */ |
545 for (index=0; index < NUMBER_OF_CLIENT_NODES; index++) { |
744 for (index=0; index < NUMBER_OF_CLIENT_NODES; index++) { |
546 close = 0; |
745 close = 0; |
547 if (client_nodes[index].init_state >= 2) { |
746 if (client_nodes[index].init_state >= 5) { |
548 // thread was launched, so we try to cancel it! |
747 // thread was launched, so we try to cancel it! |
549 close = pthread_cancel(client_nodes[index].thread_id); |
748 close = pthread_cancel(client_nodes[index].thread_id); |
550 close |= pthread_join (client_nodes[index].thread_id, NULL); |
749 close |= pthread_join (client_nodes[index].thread_id, NULL); |
551 if (close < 0) |
750 if (close < 0) |
552 fprintf(stderr, "Modbus plugin: Error closing thread for modbus client %%s\n", client_nodes[index].location); |
751 fprintf(stderr, "Modbus plugin: Error closing thread for modbus client node %%s\n", client_nodes[index].location); |
|
752 } |
|
753 res |= close; |
|
754 |
|
755 close = 0; |
|
756 if (client_nodes[index].init_state >= 4) { |
|
757 // timer was created, so we try to destroy it! |
|
758 close = timer_delete(client_nodes[index].timer_id); |
|
759 if (close < 0) |
|
760 fprintf(stderr, "Modbus plugin: Error destroying timer for modbus client node %%s\n", client_nodes[index].location); |
|
761 } |
|
762 res |= close; |
|
763 |
|
764 close = 0; |
|
765 if (client_nodes[index].init_state >= 3) { |
|
766 // condition variable was created, so we try to destroy it! |
|
767 close = pthread_cond_destroy(&(client_nodes[index].condv)); |
|
768 if (close < 0) |
|
769 fprintf(stderr, "Modbus plugin: Error destroying condition variable for modbus client node %%s\n", client_nodes[index].location); |
|
770 } |
|
771 res |= close; |
|
772 |
|
773 close = 0; |
|
774 if (client_nodes[index].init_state >= 2) { |
|
775 // mutex was created, so we try to destroy it! |
|
776 close = pthread_mutex_destroy(&(client_nodes[index].mutex)); |
|
777 if (close < 0) |
|
778 fprintf(stderr, "Modbus plugin: Error destroying mutex for modbus client node %%s\n", client_nodes[index].location); |
553 } |
779 } |
554 res |= close; |
780 res |= close; |
555 |
781 |
556 close = 0; |
782 close = 0; |
557 if (client_nodes[index].init_state >= 1) { |
783 if (client_nodes[index].init_state >= 1) { |
610 } |
836 } |
611 |
837 |
612 return res; |
838 return res; |
613 } |
839 } |
614 |
840 |
|
841 |
|
842 |
|
843 |
|
844 |
|
845 /**********************************************/ |
|
846 /** Functions for Beremiz web interface. **/ |
|
847 /**********************************************/ |
|
848 |
|
849 /* |
|
850 * Beremiz has a program to run on the PLC (Beremiz_service.py) |
|
851 * to handle downloading of compiled programs, start/stop of PLC, etc. |
|
852 * (see runtime/PLCObject.py for start/stop, loading, ...) |
|
853 * |
|
854 * This service also includes a web server to access PLC state (start/stop) |
|
855 * and to change some basic confiuration parameters. |
|
856 * (see runtime/NevowServer.py for the web server) |
|
857 * |
|
858 * The web server allows for extensions, where additional configuration |
|
859 * parameters may be changed on the running/downloaded PLC. |
|
860 * Modbus plugin also comes with an extension to the web server, through |
|
861 * which the basic Modbus plugin configuration parameters may be changed |
|
862 * |
|
863 * These parameters are changed _after_ the code (.so file) is loaded into |
|
864 * memmory. These changes may be applied before (or after) the code starts |
|
865 * running (i.e. before or after __init_() ets called)! |
|
866 * |
|
867 * The following functions are never called from other C code. They are |
|
868 * called instead from the python code in runtime/Modbus_config.py, that |
|
869 * implements the web server extension for configuring Modbus parameters. |
|
870 */ |
|
871 |
|
872 |
|
873 /* The number of Cient nodes (i.e. the number of entries in the client_nodes array) |
|
874 * The number of Server nodes (i.e. the numb. of entries in the server_nodes array) |
|
875 * |
|
876 * These variables are also used by the Modbus web config code to determine |
|
877 * whether the current loaded PLC includes the Modbus plugin |
|
878 * (so it should make the Modbus parameter web interface visible to the user). |
|
879 */ |
|
880 const int __modbus_plugin_client_node_count = NUMBER_OF_CLIENT_NODES; |
|
881 const int __modbus_plugin_server_node_count = NUMBER_OF_SERVER_NODES; |
|
882 const int __modbus_plugin_param_string_size = MODBUS_PARAM_STRING_SIZE; |
|
883 |
|
884 |
|
885 |
|
886 /* NOTE: We could have the python code in runtime/Modbus_config.py |
|
887 * directly access the server_node_t and client_node_t structures, |
|
888 * however this would create a tight coupling between these two |
|
889 * disjoint pieces of code. |
|
890 * Any change to the server_node_t or client_node_t structures would |
|
891 * require the python code to be changed accordingly. I have therefore |
|
892 * opted to create get/set functions, one for each parameter. |
|
893 * |
|
894 * We also convert the enumerated constants naf_ascii, etc... |
|
895 * (from node_addr_family_t in modbus/mb_addr.h) |
|
896 * into strings so as to decouple the python code that will be calling |
|
897 * these functions from the Modbus library code definitions. |
|
898 */ |
|
899 const char *addr_type_str[] = { |
|
900 [naf_ascii] = "ascii", |
|
901 [naf_rtu ] = "rtu", |
|
902 [naf_tcp ] = "tcp" |
|
903 }; |
|
904 |
|
905 |
|
906 #define __safe_strcnpy(str_dest, str_orig, max_size) { \ |
|
907 strncpy(str_dest, str_orig, max_size); \ |
|
908 str_dest[max_size - 1] = '\0'; \ |
|
909 } |
|
910 |
|
911 |
|
912 /* NOTE: The host, port and device parameters are strings that may be changed |
|
913 * (by calling the following functions) after loading the compiled code |
|
914 * (.so file) into memory, but before the code starts running |
|
915 * (i.e. before __init_() gets called). |
|
916 * This means that the host, port and device parameters may be changed |
|
917 * _before_ they get mapped onto the str1 and str2 variables by __init_(), |
|
918 * which is why the following functions must access the str1 and str2 |
|
919 * parameters directly. |
|
920 */ |
|
921 const char * __modbus_get_ClientNode_config_name(int nodeid) {return client_nodes[nodeid].config_name; } |
|
922 const char * __modbus_get_ClientNode_host (int nodeid) {return client_nodes[nodeid].str1; } |
|
923 const char * __modbus_get_ClientNode_port (int nodeid) {return client_nodes[nodeid].str2; } |
|
924 const char * __modbus_get_ClientNode_device (int nodeid) {return client_nodes[nodeid].str1; } |
|
925 int __modbus_get_ClientNode_baud (int nodeid) {return client_nodes[nodeid].node_address.addr.rtu.baud; } |
|
926 int __modbus_get_ClientNode_parity (int nodeid) {return client_nodes[nodeid].node_address.addr.rtu.parity; } |
|
927 int __modbus_get_ClientNode_stop_bits (int nodeid) {return client_nodes[nodeid].node_address.addr.rtu.stop_bits;} |
|
928 u64 __modbus_get_ClientNode_comm_period(int nodeid) {return client_nodes[nodeid].comm_period; } |
|
929 const char * __modbus_get_ClientNode_addr_type (int nodeid) {return addr_type_str[client_nodes[nodeid].node_address.naf];} |
|
930 |
|
931 const char * __modbus_get_ServerNode_config_name(int nodeid) {return server_nodes[nodeid].config_name; } |
|
932 const char * __modbus_get_ServerNode_host (int nodeid) {char*x=server_nodes[nodeid].str1; return (x[0]=='\0'?"#ANY#":x); } |
|
933 const char * __modbus_get_ServerNode_port (int nodeid) {return server_nodes[nodeid].str2; } |
|
934 const char * __modbus_get_ServerNode_device (int nodeid) {return server_nodes[nodeid].str1; } |
|
935 int __modbus_get_ServerNode_baud (int nodeid) {return server_nodes[nodeid].node_address.addr.rtu.baud; } |
|
936 int __modbus_get_ServerNode_parity (int nodeid) {return server_nodes[nodeid].node_address.addr.rtu.parity; } |
|
937 int __modbus_get_ServerNode_stop_bits (int nodeid) {return server_nodes[nodeid].node_address.addr.rtu.stop_bits;} |
|
938 u8 __modbus_get_ServerNode_slave_id (int nodeid) {return server_nodes[nodeid].slave_id; } |
|
939 const char * __modbus_get_ServerNode_addr_type (int nodeid) {return addr_type_str[server_nodes[nodeid].node_address.naf];} |
|
940 |
|
941 |
|
942 void __modbus_set_ClientNode_host (int nodeid, const char * value) {__safe_strcnpy(client_nodes[nodeid].str1, value, MODBUS_PARAM_STRING_SIZE);} |
|
943 void __modbus_set_ClientNode_port (int nodeid, const char * value) {__safe_strcnpy(client_nodes[nodeid].str2, value, MODBUS_PARAM_STRING_SIZE);} |
|
944 void __modbus_set_ClientNode_device (int nodeid, const char * value) {__safe_strcnpy(client_nodes[nodeid].str1, value, MODBUS_PARAM_STRING_SIZE);} |
|
945 void __modbus_set_ClientNode_baud (int nodeid, int value) {client_nodes[nodeid].node_address.addr.rtu.baud = value;} |
|
946 void __modbus_set_ClientNode_parity (int nodeid, int value) {client_nodes[nodeid].node_address.addr.rtu.parity = value;} |
|
947 void __modbus_set_ClientNode_stop_bits (int nodeid, int value) {client_nodes[nodeid].node_address.addr.rtu.stop_bits = value;} |
|
948 void __modbus_set_ClientNode_comm_period(int nodeid, u64 value) {client_nodes[nodeid].comm_period = value;} |
|
949 |
|
950 |
|
951 void __modbus_set_ServerNode_host (int nodeid, const char * value) {if (strcmp(value,"#ANY#")==0) value = ""; |
|
952 __safe_strcnpy(server_nodes[nodeid].str1, value, MODBUS_PARAM_STRING_SIZE);} |
|
953 void __modbus_set_ServerNode_port (int nodeid, const char * value) {__safe_strcnpy(server_nodes[nodeid].str2, value, MODBUS_PARAM_STRING_SIZE);} |
|
954 void __modbus_set_ServerNode_device (int nodeid, const char * value) {__safe_strcnpy(server_nodes[nodeid].str1, value, MODBUS_PARAM_STRING_SIZE);} |
|
955 void __modbus_set_ServerNode_baud (int nodeid, int value) {server_nodes[nodeid].node_address.addr.rtu.baud = value;} |
|
956 void __modbus_set_ServerNode_parity (int nodeid, int value) {server_nodes[nodeid].node_address.addr.rtu.parity = value;} |
|
957 void __modbus_set_ServerNode_stop_bits (int nodeid, int value) {server_nodes[nodeid].node_address.addr.rtu.stop_bits = value;} |
|
958 void __modbus_set_ServerNode_slave_id (int nodeid, u8 value) {server_nodes[nodeid].slave_id = value;} |
|
959 |