fix array bounds check.
/*
* matiec - a compiler for the programming languages defined in IEC 61131-3
*
* Copyright (C) 2003-2011 Mario de Sousa (msousa@fe.up.pt)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* This code is made available on the understanding that it will not be
* used in safety-critical situations without a full and competent review.
*/
/*
* An IEC 61131-3 compiler.
*
* Based on the
* FINAL DRAFT - IEC 61131-3, 2nd Ed. (2001-12-10)
*
*/
/*
* Stage 1
* =======
*
* This file contains the lexical tokens definitions, from which
* the flex utility will generate a lexical parser function.
*/
/*****************************/
/* Lexical Parser Options... */
/*****************************/
/* The lexical analyser will never work in interactive mode,
* i.e., it will only process programs saved to files, and never
* programs being written inter-actively by the user.
* This option saves the resulting parser from calling the
* isatty() function, that seems to be generating some compile
* errors under some (older?) versions of flex.
*/
%option never-interactive
/* Have the lexical analyser use a 'char *yytext' instead of an
* array of char 'char yytext[??]' to store the lexical token.
*/
%pointer
/* Have the lexical analyser ignore the case of letters.
* This will occur for all the tokens and keywords, but
* the resulting text handed up to the syntax parser
* will not be changed, and keep the original case
* of the letters in the input file.
*/
%option case-insensitive
/* Have the generated lexical analyser keep track of the
* line number it is currently analysing.
* This is used to pass up to the syntax parser
* the number of the line on which the current
* token was found. It will enable the syntax parser
* to generate more informatve error messages...
*/
%option yylineno
/* required for the use of the yy_pop_state() and
* yy_push_state() functions
*/
%option stack
/* The '%option stack' also requests the inclusion of
* the yy_top_state(), however this function is not
* currently being used. This means that the compiler
* is complaining about the existance of this function.
* The following option removes the yy_top_state()
* function from the resulting c code, so the compiler
* no longer complains.
*/
%option noyy_top_state
/* We will be using unput() in our flex code, so we cannot set the following option!... */
/*
%option nounput
*/
/**************************************************/
/* External Variable and Function declarations... */
/**************************************************/
%{
/* Define TEST_MAIN to include a main() function.
* Useful for testing the parser generated by flex.
*/
/*
#define TEST_MAIN
*/
/* If lexical parser is compiled by itself, we need to define the following
* constant to some string. Under normal circumstances LIBDIRECTORY is set
* in the syntax parser header file...
*/
#ifdef TEST_MAIN
#define DEFAULT_LIBDIR "just_testing"
#endif
/* Required for strdup() */
#include <string.h>
/* Required only for the declaration of abstract syntax classes
* (class symbol_c; class token_c; class list_c;)
* These will not be used in flex, but the token type union defined
* in iec_bison.h contains pointers to these classes, so we must include
* it here.
*/
#include "../absyntax/absyntax.hh"
/* iec_bison.h is generated by bison.
* Contains the definition of the token constants, and the
* token value type YYSTYPE (in our case, a 'const char *')
*/
#include "iec_bison.h"
#include "stage1_2_priv.hh"
/* Variable defined by the bison parser,
* where the value of the tokens will be stored
*/
extern YYSTYPE yylval;
/* The name of the file currently being parsed...
* This variable is declared and read from the code generated by bison!
* Note that flex accesses and updates this global variable
* apropriately whenever it comes across an (*#include <filename> *)
* directive...
*/
/*
NOTE: already defined in iec_bison.h
extern const char *current_filename;
*/
/* We will not be using unput() in our flex code... */
/* NOTE: it seems that this #define is no longer needed, It has been
* replaced by %option nounput.
* Should we simply delete it?
* For now leave it in, in case someone is using an old version of flex.
* In any case, the most harm that can result in a warning message
* when compiling iec.flex.c:
* warning: ‘void yyunput(int, char*)’ defined but not used
*/
#define YY_NO_UNPUT
/* Variable defined by the bison parser.
* It must be initialised with the location
* of the token being parsed.
* This is only needed if we want to keep
* track of the locations, in order to give
* more meaningful error messages!
*/
/*
*extern YYLTYPE yylloc;
b*/
#define YY_INPUT(buf,result,max_size) {\
result = GetNextChar(buf, max_size);\
if ( result <= 0 )\
result = YY_NULL;\
}
/* A counter to track the order by which each token is processed.
* NOTE: This counter is not exactly linear (i.e., it does not get incremented by 1 for each token).
* i.e.. it may get incremented by more than one between two consecutive tokens.
* This is due to the fact that the counter gets incremented every 'user action' in flex,
* however not every user action will result in a token being passed to bison.
* Nevertheless this is still OK, as we are only interested in the relative
* ordering of tokens...
*/
static long int current_order = 0;
/* Macro that is executed for every action.
* We use it to pass the location of the token
* back to the bison parser...
*/
#define YY_USER_ACTION {\
yylloc.first_line = current_tracking->lineNumber; \
yylloc.first_column = current_tracking->currentTokenStart; \
yylloc.first_file = current_filename; \
yylloc.first_order = current_order; \
yylloc.last_line = current_tracking->lineNumber; \
yylloc.last_column = current_tracking->currentChar - 1; \
yylloc.last_file = current_filename; \
yylloc.last_order = current_order; \
current_tracking->currentTokenStart = current_tracking->currentChar; \
current_order++; \
}
/* Since this lexical parser we defined only works in ASCII based
* systems, we might as well make sure it is being compiled on
* one...
* Lets check a few random characters...
*/
#if (('a' != 0x61) || ('A' != 0x41) || ('z' != 0x7A) || ('Z' != 0x5A) || \
('0' != 0x30) || ('9' != 0x39) || ('(' != 0x28) || ('[' != 0x5B))
#error This lexical analyser is not portable to a non ASCII based system.
#endif
/* Function only called from within flex, but defined
* in iec.y!
* We declare it here...
*
* Search for a symbol in either of the two symbol tables
* and return the token id of the first symbol found.
* Searches first in the variables, and only if not found
* does it continue searching in the library elements
*/
//token_id_t get_identifier_token(const char *identifier_str);
int get_identifier_token(const char *identifier_str);
%}
/***************************************************/
/* Forward Declaration of functions defined later. */
/***************************************************/
%{
/* return all the text in the current token back to the input stream. */
void unput_text(unsigned int n);
/* return all the text in the current token back to the input stream,
* but first return to the stream an additional character to mark the end of the token.
*/
void unput_and_mark(const char c);
%}
/****************************/
/* Lexical Parser States... */
/****************************/
/* NOTE: Our psrser can parse st or il code, intermixed
* within the same file.
* With IL we come across the issue of the EOL (end of line) token.
* ST, and the declaration parts of IL do not use this token!
* If the lexical analyser were to issue this token during ST
* language parsing, or during the declaration of data types,
* function headers, etc. in IL, the syntax parser would crash.
*
* We can solve this issue using one of three methods:
* (1) Augment all the syntax that does not accept the EOL
* token to simply ignore it. This makes the syntax
* definition (in iec.y) very cluttered!
* (2) Let the lexical parser figure out which language
* it is parsing, and decide whether or not to issue
* the EOL token. This requires the lexical parser
* to have knowledge of the syntax!, making for a poor
* overall organisation of the code. It would also make it
* very difficult to understand the lexical parser as it
* would use several states, and a state machine to transition
* between the states. The state transitions would be
* intermingled with the lexical parser defintion!
* (3) Use a mixture of (1) and (2). The lexical analyser
* merely distinguishes between function headers and function
* bodies, but no longer makes a distinction between il and
* st language bodies. When parsing a body, it will return
* the EOL token. In other states '\n' will be ignored as
* whitespace.
* The ST language syntax has been augmented in the syntax
* parser configuration to ignore any EOL tokens that it may
* come across!
* This option has both drawbacks of option (1) and (2), but
* much less intensely.
* The syntax that gets cluttered is limited to the ST statements
* (which is rather limited, compared to the function headers and
* data type declarations, etc...), while the state machine in
* the lexical parser becomes very simple. All state transitions
* can be handled within the lexical parser by itself, and can be
* easily identified. Thus knowledge of the syntax required by
* the lexical parser is very limited!
*
* Amazingly enough, I (Mario) got to implement option (3)
* at first, requiring two basic states, decl and body.
* The lexical parser will enter the body state when
* it is parsing the body of a function/program/function block. The
* state transition is done when we find a VAR_END that is not followed
* by a VAR! This is the syntax knowledge that gets included in the
* lexical analyser with this option!
* Unfortunately, getting the st syntax parser to ignore EOL anywhere
* where they might appear leads to conflicts. This is due to the fact
* that the syntax parser uses the single look-ahead token to remove
* possible conflicts. When we insert a possible EOL, the single
* look ahead token becomes the EOL, which means the potential conflicts
* could no longer be resolved.
* Removing these conflicts would make the st syntax parser very convoluted,
* and adding the extraneous EOL would make it very cluttered.
* This option was therefore dropped in favour of another!
*
* I ended up implementing (2). Unfortunately the lexical analyser can
* not easily distinguish between il and st code, since function
* calls in il are very similar to function block calls in st.
* We therefore use an extra 'body' state. When the lexical parser
* finds that last END_VAR, it enters the body state. This state
* must figure out what language is being parsed from the first few
* tokens, and switch to the correct state (st, il or sfc) according to the
* language. This means that we insert quite a bit of knowledge of the
* syntax of the languages into the lexical parser. This is ugly, but it
* works, and at least it is possible to keep all the state changes together
* to make it easier to remove them later on if need be.
* Once the language being parsed has been identified,
* the body state returns any matched text back to the buffer with unput(),
* to be later matched correctly by the apropriate language parser (st, il or sfc).
*
* Aditionally, in sfc state it may further recursively enter the body state
* once again. This is because an sfc body may contain ACTIONS, which are then
* written in one of the three languages (ST, IL or SFC), so once again we need
* to figure out which language the ACTION in the SFC was written in. We already
* ahve all that done in the body state, so we recursively transition to the body
* state once again.
* Note that in this case, when coming out of the st/il state (whichever language
* the action was written in) the sfc state will become active again. This is done by
* pushing and poping the previously active state!
*
* The sfc_qualifier_state is required because when parsing actions within an
* sfc, we will be expecting action qualifiers (N, P, R, S, DS, SD, ...). In order
* to bison to work correctly, these qualifiers must be returned as tokens. However,
* these tokens are not reserved keywords, which means it should be possible to
* define variables/functions/FBs with any of these names (including
* S and R which are special because they are also IL operators). So, when we are not
* expecting any action qualifiers, flex does not return these tokens, and is free
* to interpret them as previously defined variables/functions/... as the case may be.
*
* The time_literal_state is required because TIME# literals are decomposed into
* portions, and wewant to send these portions one by one to bison. Each poertion will
* represent the value in days/hours/minutes/seconds/ms.
* Unfortunately, some of these portions may also be lexically analysed as an identifier. So,
* we need to disable lexical identification of identifiers while parsing TIME# literals!
* e.g.: TIME#55d_4h_56m
* We would like to return to bison the tokens 'TIME' '#' '55d' '_' '4h' '_' '56m'
* Unfortunately, flex will join '_' and '4h' to create a legal {identifier} '_4h',
* and return that identifier instead! So, we added this state!
*
* The state machine has 7 possible states (INITIAL, config, decl, body, st, il, sfc)
* Possible state changes are:
* INITIAL -> goto(decl_state)
* (when a FUNCTION, FUNCTION_BLOCK, or PROGRAM is found,
* and followed by a VAR declaration)
* INITIAL -> goto(body_state)
* (when a FUNCTION, FUNCTION_BLOCK, or PROGRAM is found,
* and _not_ followed by a VAR declaration)
* (This transition is actually commented out, since the syntax
* does not allow the declaration of functions, FBs, or programs
* without any VAR declaration!)
* INITIAL -> goto(config_state)
* (when a CONFIGURATION is found)
* decl_state -> push(decl_state); goto(body_state)
* (when the last END_VAR is found, i.e. the function body starts)
* decl_state -> push(decl_state); goto(sfc_state)
* (when it figures out it is parsing sfc language)
* body_state -> goto(st_state)
* (when it figures out it is parsing st language)
* body_state -> goto(il_state)
* (when it figures out it is parsing il language)
* st_state -> pop()
* (when a END_FUNCTION, END_FUNCTION_BLOCK, END_PROGRAM,
* END_ACTION or END_TRANSITION is found)
* il_state -> pop()
* (when a END_FUNCTION, END_FUNCTION_BLOCK, END_PROGRAM,
* END_ACTION or END_TRANSITION is found)
* decl_state -> goto(INITIAL)
* (when a END_FUNCTION, END_FUNCTION_BLOCK, or END_PROGRAM is found)
* sfc_state -> goto(INITIAL)
* (when a END_FUNCTION, END_FUNCTION_BLOCK, or END_PROGRAM is found)
* config_state -> goto(INITIAL)
* (when a END_CONFIGURATION is found)
* sfc_state -> push(sfc_state); goto(body_state)
* (when parsing an action. This transition is requested by bison)
* sfc_state -> push(sfc_state); goto(sfc_qualifier_state)
* (when expecting an action qualifier. This transition is requested by bison)
* sfc_qualifier_state -> pop()
* (when no longer expecting an action qualifier. This transition is requested by bison)
* config_state -> push(config_state); goto(task_init_state)
* (when parsing a task initialisation. This transition is requested by bison)
* task_init_state -> pop()
* (when no longer parsing task initialisation parameters. This transition is requested by bison)
*
*/
/* we are parsing a configuration. */
%s config_state
/* Inside a configuration, we are parsing a task initialisation parameters */
/* This means that PRIORITY, SINGLE and INTERVAL must be handled as
* tokens, and not as possible identifiers. Note that the above words
* are not keywords.
*/
%s task_init_state
/* we are parsing a function, program or function block declaration */
%s decl_state
/* we will be parsing a function body. Whether il/st is remains unknown */
%x body_state
/* we are parsing il code -> flex must return the EOL tokens! */
%s il_state
/* we are parsing st code -> flex must not return the EOL tokens! */
%s st_state
/* we are parsing sfc code -> flex must not return the EOL tokens! */
%s sfc_state
/* we are parsing sfc code, and expecting an action qualifier. */
%s sfc_qualifier_state
/* we are parsing sfc code, and expecting the priority token. */
%s sfc_priority_state
/* we are parsing a TIME# literal. We must not return any {identifier} tokens. */
%x time_literal_state
/*******************/
/* File #include's */
/*******************/
/* We extend the IEC 61131-3 standard syntax to allow inclusion
* of other files, using the IEC 61131-3 pragma directive...
* The accepted syntax is:
* {#include "<filename>"}
*/
/* the "include" states are used for picking up the name of an include file */
%x include_beg
%x include_filename
%x include_end
file_include_pragma_filename [^\"]*
file_include_pragma_beg "{#include"{st_whitespace_only}\"
file_include_pragma_end \"{st_whitespace_only}"}"
file_include_pragma {file_include_pragma_beg}{file_include_pragma_filename}{file_include_pragma_end}
%{
#define MAX_INCLUDE_DEPTH 16
typedef struct {
YY_BUFFER_STATE buffer_state;
tracking_t* env;
const char *filename;
} include_stack_t;
tracking_t* current_tracking;
include_stack_t include_stack[MAX_INCLUDE_DEPTH];
int include_stack_ptr = 0;
const char *INCLUDE_DIRECTORIES[] = {
DEFAULT_LIBDIR,
".",
"/lib",
"/usr/lib",
"/usr/lib/iec",
NULL /* must end with NULL!! */
};
%}
/*****************************/
/* Prelimenary constructs... */
/*****************************/
/* In order to allow the declaration of POU prototypes (Function, FB, Program, ...),
* especially the prototypes of Functions and FBs defined in the standard
* (i.e. standard functions and FBs), we extend the IEC 61131-3 standard syntax
* with two pragmas to indicate that the code is to be parsed (going through the
* lexical, syntactical, and semantic analysers), but no code is to be generated.
*
* The accepted syntax is:
* {no_code_generation begin}
* ... prototypes ...
* {no_code_generation end}
*
* When parsing these prototypes the abstract syntax tree will be populated as usual,
* allowing the semantic analyser to correctly analyse the semantics of calls to these
* functions/FBs. However, stage4 will simply ignore all IEC61131-3 code
* between the above two pragmas.
*/
disable_code_generation_pragma "{disable code generation}"
enable_code_generation_pragma "{enable code generation}"
/* Any other pragma... */
pragma "{"[^}]*"}"
/* NOTE: this seemingly unnecessary complex definition is required
* to be able to eat up comments such as:
* '(* Testing... ! ***** ******)'
* without using the trailing context command in flex (/{context})
* since {comment} itself will later be used with
* trailing context ({comment}/{context})
*/
not_asterisk [^*]
not_close_parenthesis_nor_asterisk [^*)]
asterisk "*"
comment_text {not_asterisk}|(({asterisk}+){not_close_parenthesis_nor_asterisk})
comment "(*"({comment_text}*)({asterisk}+)")"
/*
3.1 Whitespace
(NOTE: Whitespace IS clearly defined, to include newline!!! See section 2.1.4!!!)
No definition of whitespace is given, in other words, the characters that may be used to seperate language tokens are not pecisely defined. One may nevertheless make an inteligent guess of using the space (' '), and other characters also commonly considered whitespace in other programming languages (horizontal tab, vertical tab, form feed, etc.).
The main question is whether the newline character should be considered whitespace. IL language statements use an EOL token (End Of Line) to distinguish between some language constructs. The EOL token itself is openly defined as "normally consist[ing] of the 'paragraph separator' ", leaving the final choice open to each implemention. If we choose the newline character to represent the EOL token, it may then not be considered whitespace.
On the other hand, some examples that come in a non-normative annex of the specification allow function declarations to span multiple3.1 Whitespace
(NOTE: Whitespace IS clearly defined, to include newline!!! See section 2.1.4!!!)
No definition of whitespace is given, in other words, the characters that may be used to seperate language tokens are not pecisely defined. One may nevertheless make an inteligent guess of using the space (' '), and other characters also commonly considered whitespace in other programming languages (horizontal tab, vertical tab, form feed, etc.).
The main question is whether the newline character should be considered whitespace. IL language statements use an EOL token (End Of Line) to distinguish between some language constructs. The EOL token itself is openly defined as "normally consist[ing] of the 'paragraph separator' ", leaving the final choice open to each implemention. If we choose the newline character to represent the EOL token, it may then not be considered whitespace.
On the other hand, some examples that come in a non-normative annex of the specification allow function declarations to span multiple lines, which means that the newline character is being considered as whitespace.
Our implementation works around this issue by including the new line character in the whitespace while parsing function declarations and the ST language, and parsing it as the EOL token only while parsing IL language statements. This requires the use of a state machine in the lexical parser that needs at least some knowledge of the syntax itself.
*/
/* NOTE: Our definition of whitespace will only work in ASCII!
*
* Since the IL language needs to know the location of newline
* (token EOL -> '\n' ), we need one definition of whitespace
* for each language...
*/
/*
* NOTE: we cannot use
* st_whitespace [:space:]*
* since we use {st_whitespace} as trailing context. In our case
* this would not constitute "dangerous trailing context", but the
* lexical generator (i.e. flex) does not know this (since it does
* not know which characters belong to the set [:space:]), and will
* generate a "dangerous trailing context" warning!
* We use this alternative just to stop the flex utility from
* generating the invalid (in this case) warning...
*/
st_whitespace_only [ \f\n\r\t\v]*
il_whitespace_only [ \f\r\t\v]*
st_whitespace_text {st_whitespace_only}|{comment}|{pragma}
il_whitespace_text {il_whitespace_only}|{comment}|{pragma}
st_whitespace {st_whitespace_text}*
il_whitespace {il_whitespace_text}*
st_whitespace_text_no_pragma {st_whitespace_only}|{comment}
il_whitespace_text_no_pragma {il_whitespace_only}|{comment}
st_whitespace_no_pragma {st_whitespace_text_no_pragma}*
il_whitespace_no_pragma {il_whitespace_text_no_pragma}*
qualified_identifier {identifier}(\.{identifier})*
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
/* NOTE: The following definitions only work if the host computer
* is using the ASCII maping. For e.g., with EBCDIC [A-Z]
* contains non-alphabetic characters!
* The correct way of doing it would be to use
* the [:upper:] etc... definitions.
*
* Unfortunately, further on we need all printable
* characters (i.e. [:print:]), but excluding '$'.
* Flex does not allow sets to be composed by excluding
* elements. Sets may only be constructed by adding new
* elements, which means that we have to revert to
* [\x20\x21\x23\x25\x26\x28-x7E] for the definition
* of the printable characters with the required exceptions.
* The above also implies the use of ASCII, but now we have
* no way to work around it|
*
* The conclusion is that our parser is limited to ASCII
* based host computers!!
*/
letter [A-Za-z]
digit [0-9]
octal_digit [0-7]
hex_digit {digit}|[A-F]
identifier ({letter}|(_({letter}|{digit})))((_?({letter}|{digit}))*)
/*******************/
/* B.1.2 Constants */
/*******************/
/******************************/
/* B.1.2.1 Numeric literals */
/******************************/
integer {digit}((_?{digit})*)
/* Some helper symbols for parsing TIME literals... */
integer_0_59 (0(_?))*([0-5](_?))?{digit}
integer_0_19 (0(_?))*([0-1](_?))?{digit}
integer_20_23 (0(_?))*2(_?)[0-3]
integer_0_23 {integer_0_19}|{integer_20_23}
integer_0_999 {digit}((_?{digit})?)((_?{digit})?)
binary_integer 2#{bit}((_?{bit})*)
bit [0-1]
octal_integer 8#{octal_digit}((_?{octal_digit})*)
hex_integer 16#{hex_digit}((_?{hex_digit})*)
exponent [Ee]([+-]?){integer}
/* The correct definition for real would be:
* real {integer}\.{integer}({exponent}?)
*
* Unfortunately, the spec also defines fixed_point (B 1.2.3.1) as:
* fixed_point {integer}\.{integer}
*
* This means that {integer}\.{integer} could be interpreted
* as either a fixed_point or a real.
* I have opted to interpret {integer}\.{integer} as a fixed_point.
* In order to do this, the definition of real has been changed to:
* real {integer}\.{integer}{exponent}
*
* This means that the syntax parser now needs to define a real to be
* either a real_token or a fixed_point_token!
*/
real {integer}\.{integer}{exponent}
/*******************************/
/* B.1.2.2 Character Strings */
/*******************************/
/*
common_character_representation :=
<any printable character except '$', '"' or "'">
|'$$'
|'$L'|'$N'|'$P'|'$R'|'$T'
|'$l'|'$n'|'$p'|'$r'|'$t'
NOTE: $ = 0x24
" = 0x22
' = 0x27
printable chars in ASCII: 0x20-0x7E
*/
esc_char_u $L|$N|$P|$R|$T
esc_char_l $l|$n|$p|$r|$t
esc_char $$|{esc_char_u}|{esc_char_l}
double_byte_char (${hex_digit}{hex_digit}{hex_digit}{hex_digit})
single_byte_char (${hex_digit}{hex_digit})
/* WARNING:
* This definition is only valid in ASCII...
*
* Flex includes the function print_char() that defines
* all printable characters portably (i.e. whatever character
* encoding is currently being used , ASCII, EBCDIC, etc...)
* Unfortunately, we cannot generate the definition of
* common_character_representation portably, since flex
* does not allow definition of sets by subtracting
* elements in one set from another set.
* This means we must build up the defintion of
* common_character_representation using only set addition,
* which leaves us with the only choice of defining the
* characters non-portably...
*/
common_character_representation [\x20\x21\x23\x25\x26\x28-\x7E]|{esc_char}
double_byte_character_representation $\"|'|{double_byte_char}|{common_character_representation}
single_byte_character_representation $'|\"|{single_byte_char}|{common_character_representation}
double_byte_character_string \"({double_byte_character_representation}*)\"
single_byte_character_string '({single_byte_character_representation}*)'
/************************/
/* B 1.2.3.1 - Duration */
/************************/
fixed_point {integer}\.{integer}
/* NOTE: The IEC 61131-3 v2 standard has an incorrect formal syntax definition of duration,
* as its definition does not match the standard's text.
* IEC 61131-3 v3 (committee draft) seems to have this fixed, so we use that
* definition instead!
*
* duration::= ('T' | 'TIME') '#' ['+'|'-'] interval
* interval::= days | hours | minutes | seconds | milliseconds
* fixed_point ::= integer [ '.' integer]
* days ::= fixed_point 'd' | integer 'd' ['_'] [ hours ]
* hours ::= fixed_point 'h' | integer 'h' ['_'] [ minutes ]
* minutes ::= fixed_point 'm' | integer 'm' ['_'] [ seconds ]
* seconds ::= fixed_point 's' | integer 's' ['_'] [ milliseconds ]
* milliseconds ::= fixed_point 'ms'
*
*
* The original IEC 61131-3 v2 definition is:
* duration ::= ('T' | 'TIME') '#' ['-'] interval
* interval ::= days | hours | minutes | seconds | milliseconds
* fixed_point ::= integer [ '.' integer]
* days ::= fixed_point 'd' | integer 'd' ['_'] hours
* hours ::= fixed_point 'h' | integer 'h' ['_'] minutes
* minutes ::= fixed_point 'm' | integer 'm' ['_'] seconds
* seconds ::= fixed_point 's' | integer 's' ['_'] milliseconds
* milliseconds ::= fixed_point 'ms'
*/
interval_ms_X ({integer_0_999}(\.{integer})?)ms
interval_s_X {integer_0_59}s(_?{interval_ms_X})?
interval_m_X {integer_0_59}m(_?{interval_s_X})?
interval_h_X {integer_0_23}h(_?{interval_m_X})?
interval_ms {integer}ms|({fixed_point}ms)
interval_s {integer}s(_?{interval_ms_X})?|({fixed_point}s)
interval_m {integer}m(_?{interval_s_X})?|({fixed_point}m)
interval_h {integer}h(_?{interval_m_X})?|({fixed_point}h)
interval_d {integer}d(_?{interval_h_X})?|({fixed_point}d)
interval {interval_ms}|{interval_s}|{interval_m}|{interval_h}|{interval_d}
/* to help provide nice error messages, we also parse an incorrect but plausible interval... */
/* NOTE that this erroneous interval will be parsed outside the time_literal_state, so must not
* be able to parse any other legal lexcial construct (besides a legal interval, but that
* is OK as this rule will appear _after_ the rule to parse legal intervals!).
*/
fixed_point_or_integer {fixed_point}|{integer}
erroneous_interval ({fixed_point_or_integer}d_?)?({fixed_point_or_integer}h_?)?({fixed_point_or_integer}m_?)?({fixed_point_or_integer}s_?)?({fixed_point_or_integer}ms)?
/********************************************/
/* B.1.4.1 Directly Represented Variables */
/********************************************/
/* The correct definition, if the standard were to be followed... */
location_prefix [IQM]
size_prefix [XBWDL]
direct_variable_standard %{location_prefix}({size_prefix}?){integer}((.{integer})*)
/* For the MatPLC, we will accept %<identifier>
* as a direct variable, this being mapped onto the MatPLC point
* named <identifier>
*/
/* TODO: we should not restrict it to only the accepted syntax
* of <identifier> as specified by the standard. MatPLC point names
* have a more permissive syntax.
*
* e.g. "P__234"
* Is a valid MatPLC point name, but not a valid <identifier> !!
* The same happens with names such as "333", "349+23", etc...
* How can we handle these more expressive names in our case?
* Remember that some direct variable may remain anonymous, with
* declarations such as:
* VAR
* AT %I3 : BYTE := 255;
* END_VAR
* in which case we are currently using "%I3" as the variable
* name.
*/
/* direct_variable_matplc %{identifier} */
/* direct_variable {direct_variable_standard}|{direct_variable_matplc} */
direct_variable {direct_variable_standard}
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
incompl_location %[IQM]\*
%%
/* fprintf(stderr, "flex: state %d\n", YY_START); */
/*****************************************************/
/*****************************************************/
/*****************************************************/
/***** *****/
/***** *****/
/***** F I R S T T H I N G S F I R S T *****/
/***** *****/
/***** *****/
/*****************************************************/
/*****************************************************/
/*****************************************************/
/***********************************************************/
/* Handle requests sent by bison for flex to change state. */
/***********************************************************/
if (get_goto_body_state()) {
yy_push_state(body_state);
rst_goto_body_state();
}
if (get_goto_sfc_qualifier_state()) {
yy_push_state(sfc_qualifier_state);
rst_goto_sfc_qualifier_state();
}
if (get_goto_sfc_priority_state()) {
yy_push_state(sfc_priority_state);
rst_goto_sfc_priority_state();
}
if (get_goto_task_init_state()) {
yy_push_state(task_init_state);
rst_goto_task_init_state();
}
if (get_pop_state()) {
yy_pop_state();
rst_pop_state();
}
/***************************/
/* Handle the pragmas! */
/***************************/
/* We start off by searching for the pragmas we handle in the lexical parser. */
<INITIAL>{file_include_pragma} unput_text(0); yy_push_state(include_beg);
/* Pragmas sent to syntax analyser (bison) */
{disable_code_generation_pragma} return disable_code_generation_pragma_token;
{enable_code_generation_pragma} return enable_code_generation_pragma_token;
<body_state>{disable_code_generation_pragma} return disable_code_generation_pragma_token;
<body_state>{enable_code_generation_pragma} return enable_code_generation_pragma_token;
/* Any other pragma we find, we just pass it up to the syntax parser... */
/* Note that the <body_state> state is exclusive, so we have to include it here too. */
{pragma} {/* return the pragmma without the enclosing '{' and '}' */
yytext[strlen(yytext)-1] = '\0';
yylval.ID=strdup(yytext+1);
return pragma_token;
}
<body_state>{pragma} {/* return the pragmma without the enclosing '{' and '}' */
yytext[strlen(yytext)-1] = '\0';
yylval.ID=strdup(yytext+1);
return pragma_token;
}
/*********************************/
/* Handle the file includes! */
/*********************************/
<include_beg>{file_include_pragma_beg} BEGIN(include_filename);
<include_filename>{file_include_pragma_filename} {
/* got the include file name */
int i;
if (include_stack_ptr >= MAX_INCLUDE_DEPTH) {
fprintf(stderr, "Includes nested too deeply\n");
exit( 1 );
}
include_stack[include_stack_ptr].buffer_state = YY_CURRENT_BUFFER;
include_stack[include_stack_ptr].env = current_tracking;
include_stack[include_stack_ptr].filename = current_filename;
for (i = 0, yyin = NULL; (INCLUDE_DIRECTORIES[i] != NULL) && (yyin == NULL); i++) {
char *full_name = strdup3(INCLUDE_DIRECTORIES[i], "/", yytext);
if (full_name == NULL) {
fprintf(stderr, "Out of memory!\n");
exit( 1 );
}
yyin = fopen(full_name, "r");
free(full_name);
}
if (!yyin) {
fprintf(stderr, "Error opening included file %s\n", yytext);
exit( 1 );
}
current_filename = strdup(yytext);
current_tracking = GetNewTracking(yyin);
include_stack_ptr++;
/* switch input buffer to new file... */
yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
/* switch to whatever state was active before the include file */
yy_pop_state();
/* now process the new file... */
}
<<EOF>> { /* NOTE: We must not change the value of include_stack_ptr
* just yet. We must only decrement it if we are NOT
* at the end of the main file.
* If we have finished parsing the main file, then we
* must leave include_stack_ptr at 0, in case the
* parser is called once again with a new file.
* (In fact, we currently do just that!)
*/
free(current_tracking);
if (include_stack_ptr == 0) {
/* yyterminate() terminates the scanner and returns a 0 to the
* scanner's caller, indicating "all done".
*
* Our syntax parser (written with bison) has the token
* END_OF_INPUT associated to the value 0, so even though
* we don't explicitly return the token END_OF_INPUT
* calling yyterminate() is equivalent to doing that.
*/
yyterminate();
}
else {
--include_stack_ptr;
yy_delete_buffer(YY_CURRENT_BUFFER);
yy_switch_to_buffer((include_stack[include_stack_ptr]).buffer_state);
current_tracking = include_stack[include_stack_ptr].env;
/* removing constness of char *. This is safe actually,
* since the only real const char * that is stored on the stack is
* the first one (i.e. the one that gets stored in include_stack[0],
* which is never free'd!
*/
/* NOTE: We do __NOT__ free the malloc()'d memory since
* pointers to this filename will be kept by many objects
* in the abstract syntax tree.
* This will later be used to provide correct error
* messages during semantic analysis (stage 3)
*/
/* free((char *)current_filename); */
current_filename = include_stack[include_stack_ptr].filename;
yy_push_state(include_end);
}
}
<include_end>{file_include_pragma_end} yy_pop_state();
/*********************************/
/* Handle all the state changes! */
/*********************************/
/* INITIAL -> decl_state */
<INITIAL>{
/* NOTE: how about functions that do not declare variables, and go directly to the body_state???
* - According to Section 2.5.1.3 (Function Declaration), item 2 in the list, a FUNCTION
* must have at least one input argument, so a correct declaration will have at least
* one VAR_INPUT ... VAR_END construct!
* - According to Section 2.5.2.2 (Function Block Declaration), a FUNCTION_BLOCK
* must have at least one input argument, so a correct declaration will have at least
* one VAR_INPUT ... VAR_END construct!
* - According to Section 2.5.3 (Programs), a PROGRAM must have at least one input
* argument, so a correct declaration will have at least one VAR_INPUT ... VAR_END
* construct!
*
* All the above means that we needn't worry about PROGRAMs, FUNCTIONs or
* FUNCTION_BLOCKs that do not have at least one VAR_END before the body_state.
* If the code has an error, and no VAR_END before the body, we will simply
* continue in the <decl_state> state, untill the end of the FUNCTION, FUNCTION_BLOCK
* or PROGAM.
*/
FUNCTION BEGIN(decl_state); return FUNCTION;
FUNCTION_BLOCK BEGIN(decl_state); return FUNCTION_BLOCK;
PROGRAM BEGIN(decl_state); return PROGRAM;
CONFIGURATION BEGIN(config_state); return CONFIGURATION;
}
/* INITIAL -> body_state */
/* required if the function, program, etc.. has no VAR block! */
/* We comment it out since the standard does not allow this. */
/* NOTE: Even if we were to include the following code, it */
/* would have no effect whatsoever since the above */
/* rules will take precendence! */
/*
<INITIAL>{
FUNCTION BEGIN(body_state); return FUNCTION;
FUNCTION_BLOCK BEGIN(body_state); return FUNCTION_BLOCK;
PROGRAM BEGIN(body_state); return PROGRAM;
}
*/
/* decl_state -> (body_state | sfc_state) */
<decl_state>{
END_VAR{st_whitespace}VAR {unput_text(strlen("END_VAR"));
return END_VAR;
}
END_VAR{st_whitespace}INITIAL_STEP {unput_text(strlen("END_VAR"));
yy_push_state(sfc_state);
return END_VAR;
}
END_VAR{st_whitespace} {unput_text(strlen("END_VAR"));
cmd_goto_body_state();
return END_VAR;
}
}
/* body_state -> (il_state | st_state) */
<body_state>{
{st_whitespace_no_pragma} /* Eat any whitespace */
{qualified_identifier}{st_whitespace}":=" unput_text(0); BEGIN(st_state);
{direct_variable_standard}{st_whitespace}":=" unput_text(0); BEGIN(st_state);
{qualified_identifier}"[" unput_text(0); BEGIN(st_state);
RETURN unput_text(0); BEGIN(st_state);
IF unput_text(0); BEGIN(st_state);
CASE unput_text(0); BEGIN(st_state);
FOR unput_text(0); BEGIN(st_state);
WHILE unput_text(0); BEGIN(st_state);
REPEAT unput_text(0); BEGIN(st_state);
EXIT unput_text(0); BEGIN(st_state);
/* ':=' occurs only in transitions, and not Function or FB bodies! */
:= unput_text(0); BEGIN(st_state);
/* Hopefully, the above rules (along with the last one),
* used to distinguish ST from IL, are
* enough to handle all ocurrences. However, if
* there is some situation where the compiler is getting confused,
* we add the following rule to detect 'label:' in IL code. This will
* allow the user to insert a label right at the beginning (which
* will probably not be used further by his code) simply as a way
* to force the compiler to interpret his code as IL code.
*/
{identifier}{st_whitespace}":"{st_whitespace} unput_text(0); BEGIN(il_state);
{identifier} {int token = get_identifier_token(yytext);
if (token == prev_declared_fb_name_token) {
/* the code has a call to a function block */
/* NOTE: if we ever decide to allow the user to use IL operator tokens
* (LD, ST, ...) as identifiers for variable names (including
* function block instances), then the above inference/conclusion
* may be incorrect, and this condition may have to be changed!
*/
BEGIN(st_state);
} else {
BEGIN(il_state);
}
unput_text(0);
}
. unput_text(0); BEGIN(il_state);
} /* end of body_state lexical parser */
/* (il_state | st_state) -> $previous_state (decl_state or sfc_state) */
<il_state,st_state>{
END_FUNCTION yy_pop_state(); unput_text(0);
END_FUNCTION_BLOCK yy_pop_state(); unput_text(0);
END_PROGRAM yy_pop_state(); unput_text(0);
END_TRANSITION yy_pop_state(); unput_text(0);
END_ACTION yy_pop_state(); unput_text(0);
}
/* sfc_state -> INITIAL */
<sfc_state>{
END_FUNCTION yy_pop_state(); unput_text(0);
END_FUNCTION_BLOCK yy_pop_state(); unput_text(0);
END_PROGRAM yy_pop_state(); unput_text(0);
}
/* decl_state -> INITIAL */
<decl_state>{
END_FUNCTION BEGIN(INITIAL); return END_FUNCTION;
END_FUNCTION_BLOCK BEGIN(INITIAL); return END_FUNCTION_BLOCK;
END_PROGRAM BEGIN(INITIAL); return END_PROGRAM;
}
/* config -> INITIAL */
END_CONFIGURATION BEGIN(INITIAL); return END_CONFIGURATION;
/***************************************/
/* Next is to to remove all whitespace */
/***************************************/
/* NOTE: pragmas are handled right at the beginning... */
<INITIAL,config_state,decl_state,st_state,sfc_state,task_init_state,sfc_qualifier_state>{st_whitespace_no_pragma} /* Eat any whitespace */
<il_state>{il_whitespace_no_pragma} /* Eat any whitespace */
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
/* NOTE: 'R1', 'IN', etc... are IL operators, and therefore tokens
* On the other hand, the spec does not define them as keywords,
* which means they may be re-used for variable names, etc...!
* The syntax parser already caters for the possibility of these
* tokens being used for variable names in their declarations.
* When they are declared, they will be added to the variable symbol table!
* Further appearances of these tokens must no longer be parsed
* as R1_tokens etc..., but rather as variable_name_tokens!
*
* That is why the first thing we do with identifiers, even before
* checking whether they may be a 'keyword', is to check whether
* they have been previously declared as a variable name,
*
* However, we have a dilema! Should we here also check for
* prev_declared_derived_function_name_token?
* If we do, then the 'MOD' default library function (defined in
* the standard) will always be returned as a function name, and
* it will therefore not be possible to use it as an operator as
* in the following ST expression 'X := Y MOD Z;' !
* If we don't, then even it will not be possible to use 'MOD'
* as a funtion as in 'X := MOD(Y, Z);'
* We solve this by NOT testing for function names here, and
* handling this function and keyword clash in bison!
*/
/*
{identifier} {int token = get_identifier_token(yytext);
// fprintf(stderr, "flex: analysing identifier '%s'...", yytext);
if ((token == prev_declared_variable_name_token) ||
// (token == prev_declared_derived_function_name_token) || // DO NOT add this condition!
(token == prev_declared_fb_name_token)) {
// if (token != identifier_token)
// * NOTE: if we replace the above uncommented conditions with
* the simple test of (token != identifier_token), then
* 'MOD' et al must be removed from the
* library_symbol_table as a default function name!
* //
yylval.ID=strdup(yytext);
// fprintf(stderr, "returning token %d\n", token);
return token;
}
// otherwise, leave it for the other lexical parser rules...
// fprintf(stderr, "rejecting\n");
REJECT;
}
*/
/******************************************************/
/******************************************************/
/******************************************************/
/***** *****/
/***** *****/
/***** N O W D O T H E K E Y W O R D S *****/
/***** *****/
/***** *****/
/******************************************************/
/******************************************************/
/******************************************************/
EN return EN; /* Keyword */
ENO return ENO; /* Keyword */
/******************************/
/* B 1.2.1 - Numeric Literals */
/******************************/
TRUE return TRUE; /* Keyword */
BOOL#1 return boolean_true_literal_token;
BOOL#TRUE return boolean_true_literal_token;
SAFEBOOL#1 {if (get_opt_safe_extensions()) {return safeboolean_true_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
SAFEBOOL#TRUE {if (get_opt_safe_extensions()) {return safeboolean_true_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
FALSE return FALSE; /* Keyword */
BOOL#0 return boolean_false_literal_token;
BOOL#FALSE return boolean_false_literal_token;
SAFEBOOL#0 {if (get_opt_safe_extensions()) {return safeboolean_false_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
SAFEBOOL#FALSE {if (get_opt_safe_extensions()) {return safeboolean_false_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
/************************/
/* B 1.2.3.1 - Duration */
/************************/
t# return T_SHARP; /* Delimiter */
T# return T_SHARP; /* Delimiter */
TIME return TIME; /* Keyword (Data Type) */
/************************************/
/* B 1.2.3.2 - Time of day and Date */
/************************************/
TIME_OF_DAY return TIME_OF_DAY; /* Keyword (Data Type) */
TOD return TIME_OF_DAY; /* Keyword (Data Type) */
DATE return DATE; /* Keyword (Data Type) */
d# return D_SHARP; /* Delimiter */
D# return D_SHARP; /* Delimiter */
DATE_AND_TIME return DATE_AND_TIME; /* Keyword (Data Type) */
DT return DATE_AND_TIME; /* Keyword (Data Type) */
/***********************************/
/* B 1.3.1 - Elementary Data Types */
/***********************************/
BOOL return BOOL; /* Keyword (Data Type) */
BYTE return BYTE; /* Keyword (Data Type) */
WORD return WORD; /* Keyword (Data Type) */
DWORD return DWORD; /* Keyword (Data Type) */
LWORD return LWORD; /* Keyword (Data Type) */
SINT return SINT; /* Keyword (Data Type) */
INT return INT; /* Keyword (Data Type) */
DINT return DINT; /* Keyword (Data Type) */
LINT return LINT; /* Keyword (Data Type) */
USINT return USINT; /* Keyword (Data Type) */
UINT return UINT; /* Keyword (Data Type) */
UDINT return UDINT; /* Keyword (Data Type) */
ULINT return ULINT; /* Keyword (Data Type) */
REAL return REAL; /* Keyword (Data Type) */
LREAL return LREAL; /* Keyword (Data Type) */
WSTRING return WSTRING; /* Keyword (Data Type) */
STRING return STRING; /* Keyword (Data Type) */
TIME return TIME; /* Keyword (Data Type) */
DATE return DATE; /* Keyword (Data Type) */
DT return DT; /* Keyword (Data Type) */
TOD return TOD; /* Keyword (Data Type) */
DATE_AND_TIME return DATE_AND_TIME; /* Keyword (Data Type) */
TIME_OF_DAY return TIME_OF_DAY; /* Keyword (Data Type) */
/*****************************************************************/
/* Keywords defined in "Safety Software Technical Specification" */
/*****************************************************************/
/*
* NOTE: The following keywords are define in
* "Safety Software Technical Specification,
* Part 1: Concepts and Function Blocks,
* Version 1.0 – Official Release"
* written by PLCopen - Technical Committee 5
*
* We only support these extensions and keywords
* if the apropriate command line option is given.
*/
SAFEBOOL {if (get_opt_safe_extensions()) {return SAFEBOOL;} else {REJECT;}}
SAFEBYTE {if (get_opt_safe_extensions()) {return SAFEBYTE;} else {REJECT;}}
SAFEWORD {if (get_opt_safe_extensions()) {return SAFEWORD;} else {REJECT;}}
SAFEDWORD {if (get_opt_safe_extensions()) {return SAFEDWORD;} else{REJECT;}}
SAFELWORD {if (get_opt_safe_extensions()) {return SAFELWORD;} else{REJECT;}}
SAFEREAL {if (get_opt_safe_extensions()) {return SAFESINT;} else{REJECT;}}
SAFELREAL {if (get_opt_safe_extensions()) {return SAFELREAL;} else{REJECT;}}
SAFESINT {if (get_opt_safe_extensions()) {return SAFESINT;} else{REJECT;}}
SAFEINT {if (get_opt_safe_extensions()) {return SAFEINT;} else{REJECT;}}
SAFEDINT {if (get_opt_safe_extensions()) {return SAFEDINT;} else{REJECT;}}
SAFELINT {if (get_opt_safe_extensions()) {return SAFELINT;} else{REJECT;}}
SAFEUSINT {if (get_opt_safe_extensions()) {return SAFEUSINT;} else{REJECT;}}
SAFEUINT {if (get_opt_safe_extensions()) {return SAFEUINT;} else{REJECT;}}
SAFEUDINT {if (get_opt_safe_extensions()) {return SAFEUDINT;} else{REJECT;}}
SAFEULINT {if (get_opt_safe_extensions()) {return SAFEULINT;} else{REJECT;}}
/* SAFESTRING and SAFEWSTRING are not yet supported, i.e. checked correctly, in the semantic analyser (stage 3) */
/* so it is best not to support them at all... */
/*
SAFEWSTRING {if (get_opt_safe_extensions()) {return SAFEWSTRING;} else{REJECT;}}
SAFESTRING {if (get_opt_safe_extensions()) {return SAFESTRING;} else{REJECT;}}
*/
SAFETIME {if (get_opt_safe_extensions()) {return SAFETIME;} else{REJECT;}}
SAFEDATE {if (get_opt_safe_extensions()) {return SAFEDATE;} else{REJECT;}}
SAFEDT {if (get_opt_safe_extensions()) {return SAFEDT;} else{REJECT;}}
SAFETOD {if (get_opt_safe_extensions()) {return SAFETOD;} else{REJECT;}}
SAFEDATE_AND_TIME {if (get_opt_safe_extensions()) {return SAFEDATE_AND_TIME;} else{REJECT;}}
SAFETIME_OF_DAY {if (get_opt_safe_extensions()) {return SAFETIME_OF_DAY;} else{REJECT;}}
/********************************/
/* B 1.3.2 - Generic data types */
/********************************/
/* Strangely, the following symbols do not seem to be required! */
/* But we include them so they become reserved words, and do not
* get passed up to bison as an identifier...
*/
ANY return ANY; /* Keyword (Data Type) */
ANY_DERIVED return ANY_DERIVED; /* Keyword (Data Type) */
ANY_ELEMENTARY return ANY_ELEMENTARY; /* Keyword (Data Type) */
ANY_MAGNITUDE return ANY_MAGNITUDE; /* Keyword (Data Type) */
ANY_NUM return ANY_NUM; /* Keyword (Data Type) */
ANY_REAL return ANY_REAL; /* Keyword (Data Type) */
ANY_INT return ANY_INT; /* Keyword (Data Type) */
ANY_BIT return ANY_BIT; /* Keyword (Data Type) */
ANY_STRING return ANY_STRING; /* Keyword (Data Type) */
ANY_DATE return ANY_DATE; /* Keyword (Data Type) */
/********************************/
/* B 1.3.3 - Derived data types */
/********************************/
":=" return ASSIGN; /* Delimiter */
".." return DOTDOT; /* Delimiter */
TYPE return TYPE; /* Keyword */
END_TYPE return END_TYPE; /* Keyword */
ARRAY return ARRAY; /* Keyword */
OF return OF; /* Keyword */
STRUCT return STRUCT; /* Keyword */
END_STRUCT return END_STRUCT; /* Keyword */
/*********************/
/* B 1.4 - Variables */
/*********************/
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
VAR_INPUT return VAR_INPUT; /* Keyword */
VAR_OUTPUT return VAR_OUTPUT; /* Keyword */
VAR_IN_OUT return VAR_IN_OUT; /* Keyword */
VAR_EXTERNAL return VAR_EXTERNAL; /* Keyword */
VAR_GLOBAL return VAR_GLOBAL; /* Keyword */
END_VAR return END_VAR; /* Keyword */
RETAIN return RETAIN; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
R_EDGE return R_EDGE; /* Keyword */
F_EDGE return F_EDGE; /* Keyword */
AT return AT; /* Keyword */
/***********************/
/* B 1.5.1 - Functions */
/***********************/
FUNCTION return FUNCTION; /* Keyword */
END_FUNCTION return END_FUNCTION; /* Keyword */
VAR return VAR; /* Keyword */
CONSTANT return CONSTANT; /* Keyword */
/*****************************/
/* B 1.5.2 - Function Blocks */
/*****************************/
FUNCTION_BLOCK return FUNCTION_BLOCK; /* Keyword */
END_FUNCTION_BLOCK return END_FUNCTION_BLOCK; /* Keyword */
VAR_TEMP return VAR_TEMP; /* Keyword */
VAR return VAR; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
END_VAR return END_VAR; /* Keyword */
/**********************/
/* B 1.5.3 - Programs */
/**********************/
PROGRAM return PROGRAM; /* Keyword */
END_PROGRAM return END_PROGRAM; /* Keyword */
/********************************************/
/* B 1.6 Sequential Function Chart elements */
/********************************************/
/* NOTE: the following identifiers/tokens clash with the R and S IL operators, as well
.* as other identifiers that may be used as variable names inside IL and ST programs.
* They will have to be handled when we include parsing of SFC... For now, simply
* ignore them!
*/
ACTION return ACTION; /* Keyword */
END_ACTION return END_ACTION; /* Keyword */
TRANSITION return TRANSITION; /* Keyword */
END_TRANSITION return END_TRANSITION; /* Keyword */
FROM return FROM; /* Keyword */
TO return TO; /* Keyword */
INITIAL_STEP return INITIAL_STEP; /* Keyword */
STEP return STEP; /* Keyword */
END_STEP return END_STEP; /* Keyword */
/* PRIORITY is not a keyword, so we only return it when
* it is explicitly required and we are not expecting any identifiers
* that could also use the same letter sequence (i.e. an identifier: piority)
*/
<sfc_priority_state>PRIORITY return PRIORITY;
<sfc_qualifier_state>{
L return L;
D return D;
SD return SD;
DS return DS;
SL return SL;
N return N;
P return P;
R return R;
S return S;
}
/********************************/
/* B 1.7 Configuration elements */
/********************************/
CONFIGURATION return CONFIGURATION; /* Keyword */
END_CONFIGURATION return END_CONFIGURATION; /* Keyword */
TASK return TASK; /* Keyword */
RESOURCE return RESOURCE; /* Keyword */
ON return ON; /* Keyword */
END_RESOURCE return END_RESOURCE; /* Keyword */
VAR_CONFIG return VAR_CONFIG; /* Keyword */
VAR_ACCESS return VAR_ACCESS; /* Keyword */
END_VAR return END_VAR; /* Keyword */
WITH return WITH; /* Keyword */
PROGRAM return PROGRAM; /* Keyword */
RETAIN return RETAIN; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
READ_WRITE return READ_WRITE; /* Keyword */
READ_ONLY return READ_ONLY; /* Keyword */
/* PRIORITY, SINGLE and INTERVAL are not a keywords, so we only return them when
* it is explicitly required and we are not expecting any identifiers
* that could also use the same letter sequence (i.e. an identifier: piority, ...)
*/
<task_init_state>{
PRIORITY return PRIORITY;
SINGLE return SINGLE;
INTERVAL return INTERVAL;
}
/***********************************/
/* B 2.1 Instructions and Operands */
/***********************************/
<il_state>\n return EOL;
/*******************/
/* B 2.2 Operators */
/*******************/
/* NOTE: we can't have flex return the same token for
* ANDN and &N, neither for AND and &, since
* AND and ANDN are considered valid variable
* function or functionblock type names!
* This means that the parser may decide that the
* AND or ANDN strings found in the source code
* are being used as variable names
* and not as operators, and will therefore transform
* these tokens into indentifier tokens!
* We can't have the parser thinking that the source
* code contained the string AND (which may be interpreted
* as a vairable name) when in reality the source code
* merely contained the character &, so we use two
* different tokens for & and AND (and similarly
* ANDN and &N)!
*/
/* The following tokens clash with ST expression operators and Standard Functions */
/* They are also keywords! */
AND return AND; /* Keyword */
MOD return MOD; /* Keyword */
OR return OR; /* Keyword */
XOR return XOR; /* Keyword */
NOT return NOT; /* Keyword */
/* The following tokens clash with Standard Functions */
/* They are keywords because they are a function name */
<il_state>{
ADD return ADD; /* Keyword (Standard Function) */
DIV return DIV; /* Keyword (Standard Function) */
EQ return EQ; /* Keyword (Standard Function) */
GE return GE; /* Keyword (Standard Function) */
GT return GT; /* Keyword (Standard Function) */
LE return LE; /* Keyword (Standard Function) */
LT return LT; /* Keyword (Standard Function) */
MUL return MUL; /* Keyword (Standard Function) */
NE return NE; /* Keyword (Standard Function) */
SUB return SUB; /* Keyword (Standard Function) */
}
/* The following tokens clash with SFC action qualifiers */
/* They are not keywords! */
<il_state>{
S return S;
R return R;
}
/* The following tokens clash with ST expression operators */
& return AND2; /* NOT a Delimiter! */
/* The following tokens have no clashes */
/* They are not keywords! */
<il_state>{
LD return LD;
LDN return LDN;
ST return ST;
STN return STN;
S1 return S1;
R1 return R1;
CLK return CLK;
CU return CU;
CD return CD;
PV return PV;
IN return IN;
PT return PT;
ANDN return ANDN;
&N return ANDN2;
ORN return ORN;
XORN return XORN;
CAL return CAL;
CALC return CALC;
CALCN return CALCN;
RET return RET;
RETC return RETC;
RETCN return RETCN;
JMP return JMP;
JMPC return JMPC;
JMPCN return JMPCN;
}
/***********************/
/* B 3.1 - Expressions */
/***********************/
"**" return OPER_EXP; /* NOT a Delimiter! */
"<>" return OPER_NE; /* NOT a Delimiter! */
">=" return OPER_GE; /* NOT a Delimiter! */
"<=" return OPER_LE; /* NOT a Delimiter! */
& return AND2; /* NOT a Delimiter! */
AND return AND; /* Keyword */
XOR return XOR; /* Keyword */
OR return OR; /* Keyword */
NOT return NOT; /* Keyword */
MOD return MOD; /* Keyword */
/*****************************************/
/* B 3.2.2 Subprogram Control Statements */
/*****************************************/
:= return ASSIGN; /* Delimiter */
=> return SENDTO; /* Delimiter */
RETURN return RETURN; /* Keyword */
/********************************/
/* B 3.2.3 Selection Statements */
/********************************/
IF return IF; /* Keyword */
THEN return THEN; /* Keyword */
ELSIF return ELSIF; /* Keyword */
ELSE return ELSE; /* Keyword */
END_IF return END_IF; /* Keyword */
CASE return CASE; /* Keyword */
OF return OF; /* Keyword */
ELSE return ELSE; /* Keyword */
END_CASE return END_CASE; /* Keyword */
/********************************/
/* B 3.2.4 Iteration Statements */
/********************************/
FOR return FOR; /* Keyword */
TO return TO; /* Keyword */
BY return BY; /* Keyword */
DO return DO; /* Keyword */
END_FOR return END_FOR; /* Keyword */
WHILE return WHILE; /* Keyword */
DO return DO; /* Keyword */
END_WHILE return END_WHILE; /* Keyword */
REPEAT return REPEAT; /* Keyword */
UNTIL return UNTIL; /* Keyword */
END_REPEAT return END_REPEAT; /* Keyword */
EXIT return EXIT; /* Keyword */
/********************************************************/
/********************************************************/
/********************************************************/
/***** *****/
/***** *****/
/***** N O W W O R K W I T H V A L U E S *****/
/***** *****/
/***** *****/
/********************************************************/
/********************************************************/
/********************************************************/
/********************************************/
/* B.1.4.1 Directly Represented Variables */
/********************************************/
{direct_variable} {yylval.ID=strdup(yytext); return get_direct_variable_token(yytext);}
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
{incompl_location} {yylval.ID=strdup(yytext); return incompl_location_token;}
/************************/
/* B 1.2.3.1 - Duration */
/************************/
{fixed_point} {yylval.ID=strdup(yytext); return fixed_point_token;}
{interval} {/*fprintf(stderr, "entering time_literal_state ##%s##\n", yytext);*/ unput_and_mark('#'); yy_push_state(time_literal_state);}
{erroneous_interval} {return erroneous_interval_token;}
<time_literal_state>{
{integer}d {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_d_token;}
{integer}h {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_h_token;}
{integer}m {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_m_token;}
{integer}s {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_s_token;}
{integer}ms {yylval.ID=strdup(yytext); yylval.ID[yyleng-2] = '\0'; return integer_ms_token;}
{fixed_point}d {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_d_token;}
{fixed_point}h {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_h_token;}
{fixed_point}m {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_m_token;}
{fixed_point}s {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_s_token;}
{fixed_point}ms {yylval.ID=strdup(yytext); yylval.ID[yyleng-2] = '\0'; return fixed_point_ms_token;}
_ /* do nothing - eat it up!*/
\# {/*fprintf(stderr, "poping from time_literal_state (###)\n");*/ yy_pop_state(); return end_interval_token;}
. {ERROR;}
\n {ERROR;}
}
/*******************************/
/* B.1.2.2 Character Strings */
/*******************************/
{double_byte_character_string} {yylval.ID=strdup(yytext); return double_byte_character_string_token;}
{single_byte_character_string} {yylval.ID=strdup(yytext); return single_byte_character_string_token;}
/******************************/
/* B.1.2.1 Numeric literals */
/******************************/
{integer} {yylval.ID=strdup(yytext); return integer_token;}
{real} {yylval.ID=strdup(yytext); return real_token;}
{binary_integer} {yylval.ID=strdup(yytext); return binary_integer_token;}
{octal_integer} {yylval.ID=strdup(yytext); return octal_integer_token;}
{hex_integer} {yylval.ID=strdup(yytext); return hex_integer_token;}
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
<st_state>{identifier}/({st_whitespace})"=>" {yylval.ID=strdup(yytext); return sendto_identifier_token;}
<il_state>{identifier}/({il_whitespace})"=>" {yylval.ID=strdup(yytext); return sendto_identifier_token;}
{identifier} {yylval.ID=strdup(yytext);
// printf("returning identifier...: %s, %d\n", yytext, get_identifier_token(yytext));
return get_identifier_token(yytext);}
/************************************************/
/************************************************/
/************************************************/
/***** *****/
/***** *****/
/***** T H E L E F T O V E R S . . . *****/
/***** *****/
/***** *****/
/************************************************/
/************************************************/
/************************************************/
/* do the single character tokens...
*
* e.g.: ':' '(' ')' '+' '*' ...
*/
. {return yytext[0];}
%%
/***********************************/
/* Utility function definitions... */
/***********************************/
/* print the include file stack to stderr... */
void print_include_stack(void) {
int i;
if ((include_stack_ptr - 1) >= 0)
fprintf (stderr, "in file ");
for (i = include_stack_ptr - 1; i >= 0; i--)
fprintf (stderr, "included from file %s:%d\n", include_stack[i].filename, include_stack[i].env->lineNumber);
}
/* return all the text in the current token back to the input stream, except the first n chars. */
void unput_text(unsigned int n) {
/* it seems that flex has a bug in that it will not correctly count the line numbers
* if we return newlines back to the input stream. These newlines will be re-counted
* a second time when they are processed again by flex.
* We therefore determine how many newlines are in the text we are returning,
* and decrement the line counter acordingly...
*/
/*unsigned int i;
for (i = n; i < strlen(yytext); i++)
if (yytext[i] == '\n')
current_tracking->lineNumber--;*/
/* now return all the text back to the input stream... */
yyless(n);
}
/* return all the text in the current token back to the input stream,
* but first return to the stream an additional character to mark the end of the token.
*/
void unput_and_mark(const char c) {
char *yycopy = strdup( yytext ); /* unput() destroys yytext, so we copy it first */
unput(c);
for (int i = yyleng-1; i >= 0; i--)
unput(yycopy[i]);
free(yycopy);
}
/* Called by flex when it reaches the end-of-file */
int yywrap(void)
{
/* We reached the end of the input file... */
/* Should we continue with another file? */
/* If so:
* open the new file...
* return 0;
*/
/* to we stop processing...
*
* return 1;
*/
return 1; /* Stop scanning at end of input file. */
}
/*************************************/
/* Include a main() function to test */
/* the token parsing by flex.... */
/*************************************/
#ifdef TEST_MAIN
#include "../util/symtable.hh"
yystype yylval;
YYLTYPE yylloc;
const char *current_filename;
int get_identifier_token(const char *identifier_str) {return 0;}
int get_direct_variable_token(const char *direct_variable_str) {return 0;}
int main(int argc, char **argv) {
FILE *in_file;
int res;
if (argc == 1) {
/* Work as an interactive (command line) parser... */
while((res=yylex()))
fprintf(stderr, "(line %d)token: %d\n", yylineno, res);
} else {
/* Work as non-interactive (file) parser... */
if((in_file = fopen(argv[1], "r")) == NULL) {
char *errmsg = strdup2("Error opening main file ", argv[1]);
perror(errmsg);
free(errmsg);
return -1;
}
/* parse the file... */
yyin = in_file;
current_filename = argv[1];
while(1) {
res=yylex();
fprintf(stderr, "(line %d)token: %d (%s)\n", yylineno, res, yylval.ID);
}
}
return 0;
}
#endif