/*
* matiec - a compiler for the programming languages defined in IEC 61131-3
*
* Copyright (C) 2003-2011 Mario de Sousa (msousa@fe.up.pt)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of thest_whitespaceLicense, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*
*
* This code is made available on the understanding that it will not be
* used in safety-critical situations without a full and competent review.
*/
/*
* An IEC 61131-3 compiler.
*
* Based on the
* FINAL DRAFT - IEC 61131-3, 2nd Ed. (2001-12-10)
*
*/
/*
* Stage 1
* =======
*
* This file contains the lexical tokens definitions, from which
* the flex utility will generate a lexical parser function.
*/
/*****************************/
/* Lexical Parser Options... */
/*****************************/
/* The lexical analyser will never work in interactive mode,
* i.e., it will only process programs saved to files, and never
* programs being written inter-actively by the user.
* This option saves the resulting parser from calling the
* isatty() function, that seems to be generating some compile
* errors under some (older?) versions of flex.
*/
%option never-interactive
/* Have the lexical analyser use a 'char *yytext' instead of an
* array of char 'char yytext[??]' to store the lexical token.
*/
%pointer
/* Have the lexical analyser ignore the case of letters.
* This will occur for all the tokens and keywords, but
* the resulting text handed up to the syntax parser
* will not be changed, and keep the original case
* of the letters in the input file.
*/
%option case-insensitive
/* Have the generated lexical analyser keep track of the
* line number it is currently analysing.
* This is used to pass up to the syntax parser
* the number of the line on which the current
* token was found. It will enable the syntax parser
* to generate more informatve error messages...
*/
%option yylineno
/* required for the use of the yy_pop_state() and
* yy_push_state() functions
*/
%option stack
/* The '%option stack' also requests the inclusion of
* the yy_top_state(), however this function is not
* currently being used. This means that the compiler
* is complaining about the existance of this function.
* The following option removes the yy_top_state()
* function from the resulting c code, so the compiler
* no longer complains.
*/
%option noyy_top_state
/* We will be using unput() in our flex code, so we cannot set the following option!... */
/*
%option nounput
*/
/**************************************************/
/* External Variable and Function declarations... */
/**************************************************/
%{
/* Define TEST_MAIN to include a main() function.
* Useful for testing the parser generated by flex.
*/
/*
#define TEST_MAIN
*/
/* If lexical parser is compiled by itself, we need to define the following
* constant to some string. Under normal circumstances LIBDIRECTORY is set
* in the syntax parser header file...
*/
#ifdef TEST_MAIN
#define DEFAULT_LIBDIR "just_testing"
#endif
/* Required for strdup() */
#include
/* Required only for the declaration of abstract syntax classes
* (class symbol_c; class token_c; class list_c;)
* These will not be used in flex, but the token type union defined
* in iec_bison.hh contains pointers to these classes, so we must include
* it here.
*/
#include "../absyntax/absyntax.hh"
/* iec_bison.hh is generated by bison.
* Contains the definition of the token constants, and the
* token value type YYSTYPE (in our case, a 'const char *')
*/
#include "iec_bison.hh"
#include "stage1_2_priv.hh"
/* Variable defined by the bison parser,
* where the value of the tokens will be stored
*/
extern YYSTYPE yylval;
/* The name of the file currently being parsed...
* Note that flex accesses and updates this global variable
* apropriately whenever it comes across an (*#include *) directive...
*/
const char *current_filename = NULL;
/* We will not be using unput() in our flex code... */
/* NOTE: it seems that this #define is no longer needed, It has been
* replaced by %option nounput.
* Should we simply delete it?
* For now leave it in, in case someone is using an old version of flex.
* In any case, the most harm that can result in a warning message
* when compiling iec.flex.c:
* warning: ‘void yyunput(int, char*)’ defined but not used
*/
#define YY_NO_UNPUT
/* Variable defined by the bison parser.
* It must be initialised with the location
* of the token being parsed.
* This is only needed if we want to keep
* track of the locations, in order to give
* more meaningful error messages!
*/
/*
*extern YYLTYPE yylloc;
b*/
#define YY_INPUT(buf,result,max_size) {\
result = GetNextChar(buf, max_size);\
if ( result <= 0 )\
result = YY_NULL;\
}
/* Macro that is executed for every action.
* We use it to pass the location of the token
* back to the bison parser...
*/
#define YY_USER_ACTION {\
yylloc.first_line = current_tracking->lineNumber; \
yylloc.first_column = current_tracking->currentTokenStart; \
yylloc.first_file = current_filename; \
yylloc.first_order = current_order; \
yylloc.last_line = current_tracking->lineNumber; \
yylloc.last_column = current_tracking->currentChar - 1; \
yylloc.last_file = current_filename; \
yylloc.last_order = current_order; \
current_tracking->currentTokenStart = current_tracking->currentChar; \
current_order++; \
}
/* Since this lexical parser we defined only works in ASCII based
* systems, we might as well make sure it is being compiled on
* one...
* Lets check a few random characters...
*/
#if (('a' != 0x61) || ('A' != 0x41) || ('z' != 0x7A) || ('Z' != 0x5A) || \
('0' != 0x30) || ('9' != 0x39) || ('(' != 0x28) || ('[' != 0x5B))
#error This lexical analyser is not portable to a non ASCII based system.
#endif
/* Function only called from within flex, but defined
* in iec.y!
* We declare it here...
*
* Search for a symbol in either of the two symbol tables
* and return the token id of the first symbol found.
* Searches first in the variables, and only if not found
* does it continue searching in the library elements
*/
//token_id_t get_identifier_token(const char *identifier_str);
int get_identifier_token(const char *identifier_str);
%}
/***************************************************/
/* Forward Declaration of functions defined later. */
/***************************************************/
%{
/* return all the text in the current token back to the input stream. */
void unput_text(unsigned int n);
/* return all the text in the current token back to the input stream,
* but first return to the stream an additional character to mark the end of the token.
*/
void unput_and_mark(const char c);
void include_file(const char *include_filename);
int GetNextChar(char *b, int maxBuffer);
%}
/****************************/
/* Lexical Parser States... */
/****************************/
/* NOTE: Our psrser can parse st or il code, intermixed
* within the same file.
* With IL we come across the issue of the EOL (end of line) token.
* ST, and the declaration parts of IL do not use this token!
* If the lexical analyser were to issue this token during ST
* language parsing, or during the declaration of data types,
* function headers, etc. in IL, the syntax parser would crash.
*
* We can solve this issue using one of three methods:
* (1) Augment all the syntax that does not accept the EOL
* token to simply ignore it. This makes the syntax
* definition (in iec.y) very cluttered!
* (2) Let the lexical parser figure out which language
* it is parsing, and decide whether or not to issue
* the EOL token. This requires the lexical parser
* to have knowledge of the syntax!, making for a poor
* overall organisation of the code. It would also make it
* very difficult to understand the lexical parser as it
* would use several states, and a state machine to transition
* between the states. The state transitions would be
* intermingled with the lexical parser defintion!
* (3) Use a mixture of (1) and (2). The lexical analyser
* merely distinguishes between function headers and function
* bodies, but no longer makes a distinction between il and
* st language bodies. When parsing a body, it will return
* the EOL token. In other states '\n' will be ignored as
* whitespace.
* The ST language syntax has been augmented in the syntax
* parser configuration to ignore any EOL tokens that it may
* come across!
* This option has both drawbacks of option (1) and (2), but
* much less intensely.
* The syntax that gets cluttered is limited to the ST statements
* (which is rather limited, compared to the function headers and
* data type declarations, etc...), while the state machine in
* the lexical parser becomes very simple. All state transitions
* can be handled within the lexical parser by itself, and can be
* easily identified. Thus knowledge of the syntax required by
* the lexical parser is very limited!
*
* Amazingly enough, I (Mario) got to implement option (3)
* at first, requiring two basic states, decl and body.
* The lexical parser will enter the body state when
* it is parsing the body of a function/program/function block. The
* state transition is done when we find a VAR_END that is not followed
* by a VAR! This is the syntax knowledge that gets included in the
* lexical analyser with this option!
* Unfortunately, getting the st syntax parser to ignore EOL anywhere
* where they might appear leads to conflicts. This is due to the fact
* that the syntax parser uses the single look-ahead token to remove
* possible conflicts. When we insert a possible EOL, the single
* look ahead token becomes the EOL, which means the potential conflicts
* could no longer be resolved.
* Removing these conflicts would make the st syntax parser very convoluted,
* and adding the extraneous EOL would make it very cluttered.
* This option was therefore dropped in favour of another!
*
* I ended up implementing (2). Unfortunately the lexical analyser can
* not easily distinguish between il and st code, since function
* calls in il are very similar to function block calls in st.
* We therefore use an extra 'body' state. When the lexical parser
* finds that last END_VAR, it enters the body state. This state
* must figure out what language is being parsed from the first few
* tokens, and switch to the correct state (st, il or sfc) according to the
* language. This means that we insert quite a bit of knowledge of the
* syntax of the languages into the lexical parser. This is ugly, but it
* works, and at least it is possible to keep all the state changes together
* to make it easier to remove them later on if need be.
* Once the language being parsed has been identified,
* the body state returns any matched text back to the buffer with unput(),
* to be later matched correctly by the apropriate language parser (st, il or sfc).
*
* Aditionally, in sfc state it may further recursively enter the body state
* once again. This is because an sfc body may contain ACTIONS, which are then
* written in one of the three languages (ST, IL or SFC), so once again we need
* to figure out which language the ACTION in the SFC was written in. We already
* ahve all that done in the body state, so we recursively transition to the body
* state once again.
* Note that in this case, when coming out of the st/il state (whichever language
* the action was written in) the sfc state will become active again. This is done by
* pushing and poping the previously active state!
*
* The sfc_qualifier_state is required because when parsing actions within an
* sfc, we will be expecting action qualifiers (N, P, R, S, DS, SD, ...). In order
* to bison to work correctly, these qualifiers must be returned as tokens. However,
* these tokens are not reserved keywords, which means it should be possible to
* define variables/functions/FBs with any of these names (including
* S and R which are special because they are also IL operators). So, when we are not
* expecting any action qualifiers, flex does not return these tokens, and is free
* to interpret them as previously defined variables/functions/... as the case may be.
*
* The time_literal_state is required because TIME# literals are decomposed into
* portions, and wewant to send these portions one by one to bison. Each poertion will
* represent the value in days/hours/minutes/seconds/ms.
* Unfortunately, some of these portions may also be lexically analysed as an identifier. So,
* we need to disable lexical identification of identifiers while parsing TIME# literals!
* e.g.: TIME#55d_4h_56m
* We would like to return to bison the tokens 'TIME' '#' '55d' '_' '4h' '_' '56m'
* Unfortunately, flex will join '_' and '4h' to create a legal {identifier} '_4h',
* and return that identifier instead! So, we added this state!
*
* There is a main state machine...
*
* +---> INITIAL <-------> config
* | \
* | V
* | header_state
* | |
* | V
* vardecl_list_state <------> var_decl
* ^ |
* | | [using push()]
* | |
* | V
* | body,
* | |
* | |
* | -------------------
* | | | |
* | v v v
* | st il sfc
* | | | | [using pop() when leaving st/il/sfc => goes to vardecl_list_state]
* | | | |
* -----------------------
*
* NOTE:- When inside sfc, and an action or transition in ST/IL is found, then
* we also push() to the body state. This means that sometimes, when pop()ing
* from st and il, the state machine may return to the sfc state!
* - The transitions form sfc to body will be decided by bison, which will
* tell flex to do the transition by calling cmd_goto_body_state().
*
*
* Possible state changes are:
* INITIAL -> goto(config_state)
* (when a CONFIGURATION is found)
*
* INITIAL -> goto(header_state)
* (when a FUNCTION, FUNCTION_BLOCK, or PROGRAM is found)
* header_state -> goto(vardecl_list_state)
* (When the first VAR token is found, i.e. at begining of first VAR .. END_VAR declaration)
*
* vardecl_list_state -> push current state (vardecl_list_state), and goto(vardecl_state)
* (when a VAR token is found)
* vardecl_state -> pop() to (vardecl_list_state)
* (when a END_VAR token is found)
*
* vardecl_list_state -> push current state (vardecl_list_state), and goto(body_state)
* (when the last END_VAR is found!)
*
* body_state -> goto(sfc_state)
* (when it figures out it is parsing sfc language)
* body_state -> goto(st_state)
* (when it figures out it is parsing st language)
* body_state -> goto(il_state)
* (when it figures out it is parsing il language)
* st_state -> pop() to vardecl_list_state
* (when a END_FUNCTION, END_FUNCTION_BLOCK, END_PROGRAM,
* END_ACTION or END_TRANSITION is found)
* il_state -> pop() to vardecl_list_state
* (when a END_FUNCTION, END_FUNCTION_BLOCK, END_PROGRAM,
* END_ACTION or END_TRANSITION is found)
* sfc_state -> pop() to vardecl_list_state
* (when a END_FUNCTION, END_FUNCTION_BLOCK, or END_PROGRAM is found)
*
* vardecl_list_state -> goto(INITIAL)
* (when a END_FUNCTION, END_FUNCTION_BLOCK, or END_PROGRAM is found)
* config_state -> goto(INITIAL)
* (when a END_CONFIGURATION is found)
*
*
* sfc_state -> push current state(sfc_state); goto(body_state)
* (when parsing an action. This transition is requested by bison)
* sfc_state -> push current state(sfc_state); goto(sfc_qualifier_state)
* (when expecting an action qualifier. This transition is requested by bison)
* sfc_qualifier_state -> pop() to sfc_state
* (when no longer expecting an action qualifier. This transition is requested by bison)
*
* config_state -> push(config_state); goto(task_init_state)
* (when parsing a task initialisation. This transition is requested by bison)
* task_init_state -> pop()
* (when no longer parsing task initialisation parameters. This transition is requested by bison)
*
*
* There is another secondary state machine for parsing comments, another for file_includes,
* and yet another for time literals.
*/
/* we are parsing a configuration. */
%s config_state
/* Inside a configuration, we are parsing a task initialisation parameters */
/* This means that PRIORITY, SINGLE and INTERVAL must be handled as
* tokens, and not as possible identifiers. Note that the above words
* are not keywords.
*/
%s task_init_state
/* we are looking for the first VAR inside a function's, program's or function block's declaration */
/* This is not exclusive (%x) as we must be able to parse the identifier and data types of a function/FB */
%s header_state
/* we are parsing a function, program or function block sequence of VAR..END_VAR delcarations */
%x vardecl_list_state
/* a substate of the vardecl_list_state: we are inside a specific VAR .. END_VAR */
%s vardecl_state
/* we will be parsing a function body/action/transition. Whether il/st/sfc remains to be determined */
%x body_state
/* we are parsing il code -> flex must return the EOL tokens! */
%s il_state
/* we are parsing st code -> flex must not return the EOL tokens! */
%s st_state
/* we are parsing sfc code -> flex must not return the EOL tokens! */
%s sfc_state
/* we are parsing sfc code, and expecting an action qualifier. */
%s sfc_qualifier_state
/* we are parsing sfc code, and expecting the priority token. */
%s sfc_priority_state
/* we are parsing a TIME# literal. We must not return any {identifier} tokens. */
%x time_literal_state
/* we are parsing a comment. */
%x comment_state
/*******************/
/* File #include's */
/*******************/
/* We extend the IEC 61131-3 standard syntax to allow inclusion
* of other files, using the IEC 61131-3 pragma directive...
* The accepted syntax is:
* {#include ""}
*/
/* the "include" states are used for picking up the name of an include file */
%x include_beg
%x include_filename
%x include_end
file_include_pragma_filename [^\"]*
file_include_pragma_beg "{#include"{st_whitespace}\"
file_include_pragma_end \"{st_whitespace}"}"
file_include_pragma {file_include_pragma_beg}{file_include_pragma_filename}{file_include_pragma_end}
%{
/* A counter to track the order by which each token is processed.
* NOTE: This counter is not exactly linear (i.e., it does not get incremented by 1 for each token).
* i.e.. it may get incremented by more than one between two consecutive tokens.
* This is due to the fact that the counter gets incremented every 'user action' in flex,
* however not every user action will result in a token being passed to bison.
* Nevertheless this is still OK, as we are only interested in the relative
* ordering of tokens...
*/
static long int current_order = 0;
typedef struct {
int eof;
int lineNumber;
int currentChar;
int lineLength;
int currentTokenStart;
char *buffer;
FILE *in_file;
} tracking_t;
/* A forward declaration of a function defined at the end of this file. */
void FreeTracking(tracking_t *tracking);
#define MAX_INCLUDE_DEPTH 16
typedef struct {
YY_BUFFER_STATE buffer_state;
tracking_t *env;
const char *filename;
} include_stack_t;
tracking_t *current_tracking = NULL;
include_stack_t include_stack[MAX_INCLUDE_DEPTH];
int include_stack_ptr = 0;
const char *INCLUDE_DIRECTORIES[] = {
DEFAULT_LIBDIR,
".",
"/lib",
"/usr/lib",
"/usr/lib/iec",
NULL /* must end with NULL!! */
};
%}
/*****************************/
/* Prelimenary constructs... */
/*****************************/
/* PRAGMAS */
/* ======= */
/* In order to allow the declaration of POU prototypes (Function, FB, Program, ...),
* especially the prototypes of Functions and FBs defined in the standard
* (i.e. standard functions and FBs), we extend the IEC 61131-3 standard syntax
* with two pragmas to indicate that the code is to be parsed (going through the
* lexical, syntactical, and semantic analysers), but no code is to be generated.
*
* The accepted syntax is:
* {no_code_generation begin}
* ... prototypes ...
* {no_code_generation end}
*
* When parsing these prototypes the abstract syntax tree will be populated as usual,
* allowing the semantic analyser to correctly analyse the semantics of calls to these
* functions/FBs. However, stage4 will simply ignore all IEC61131-3 code
* between the above two pragmas.
*/
disable_code_generation_pragma "{disable code generation}"
enable_code_generation_pragma "{enable code generation}"
/* Any other pragma... */
pragma ("{"[^}]*"}")|("{{"([^}]|"}"[^}])*"}}")
/* COMMENTS */
/* ======== */
/* In order to allow nested comments, comments are handled by a specific comment_state state */
/* Whenever a "(*" is found, we push the current state onto the stack, and enter a new instance of the comment_state state.
* Whenever a "*)" is found, we pop a state off the stack
*/
/* comments... */
comment_beg "(*"
comment_end "*)"
/* However, bison has a shift/reduce conflict in bison, when parsing formal function/FB
* invocations with the 'NOT =>' syntax (which needs two look ahead
* tokens to be parsed correctly - and bison being LALR(1) only supports one).
* The current work around requires flex to completely parse the ' =>'
* sequence. This sequence includes whitespace and/or comments between the
* and the "=>" token.
*
* This flex rule (sendto_identifier_token) uses the whitespace/comment as trailing context,
* which means we can not use the comment_state method of specifying/finding and ignoring
* comments.
*
* For this reason only, we must also define what a complete comment looks like, so
* it may be used in this rule. Since the rule uses the whitespace_or_comment
* construct as trailing context, this definition of comment must not use any
* trailing context either.
*
* Aditionally, it is not possible to define nested comments in flex without the use of
* states, so for this particular location, we do NOT support nested comments.
*/
/* NOTE: this seemingly unnecessary complex definition is required
* to be able to eat up comments such as:
* '(* Testing... ! ***** ******)'
* without using the trailing context command in flex (/{context})
* since {comment} itself will later be used with
* trailing context ({comment}/{context})
*/
not_asterisk [^*]
not_close_parenthesis_nor_asterisk [^*)]
asterisk "*"
comment_text ({not_asterisk})|(({asterisk}+){not_close_parenthesis_nor_asterisk})
comment "(*"({comment_text}*)({asterisk}+)")"
/* 3.1 Whitespace */
/* ============== */
/*
* Whitespace is clearly defined (see IEC 61131-3 v2, section 2.1.4)
*
* Whitespace definition includes the newline character.
*
* However, the standard is inconsistent in that in IL the newline character
* is considered a token (EOL - end of line).
* In our implementation we therefore have two definitions of whitespace
* - one for ST, that includes the newline character
* - one for IL without the newline character.
* Additionally, when parsing IL, the newline character is treated as the EOL token.
* This requires the use of a state machine in the lexical parser that needs at least
* some knowledge of the syntax itself.
*
* NOTE: Our definition of whitespace will only work in ASCII!
*
* NOTE: we cannot use
* st_whitespace [:space:]*
* since we use {st_whitespace} as trailing context. In our case
* this would not constitute "dangerous trailing context", but the
* lexical generator (i.e. flex) does not know this (since it does
* not know which characters belong to the set [:space:]), and will
* generate a "dangerous trailing context" warning!
* We use this alternative just to stop the flex utility from
* generating the invalid (in this case) warning...
*/
st_whitespace [ \f\n\r\t\v]*
il_whitespace [ \f\r\t\v]*
st_whitespace_or_pragma_or_commentX ({st_whitespace})|({pragma})|({comment})
il_whitespace_or_pragma_or_commentX ({il_whitespace})|({pragma})|({comment})
st_whitespace_or_pragma_or_comment {st_whitespace_or_pragma_or_commentX}*
il_whitespace_or_pragma_or_comment {il_whitespace_or_pragma_or_commentX}*
qualified_identifier {identifier}(\.{identifier})+
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
/* NOTE: The following definitions only work if the host computer
* is using the ASCII maping. For e.g., with EBCDIC [A-Z]
* contains non-alphabetic characters!
* The correct way of doing it would be to use
* the [:upper:] etc... definitions.
*
* Unfortunately, further on we need all printable
* characters (i.e. [:print:]), but excluding '$'.
* Flex does not allow sets to be composed by excluding
* elements. Sets may only be constructed by adding new
* elements, which means that we have to revert to
* [\x20\x21\x23\x25\x26\x28-x7E] for the definition
* of the printable characters with the required exceptions.
* The above also implies the use of ASCII, but now we have
* no way to work around it|
*
* The conclusion is that our parser is limited to ASCII
* based host computers!!
*/
letter [A-Za-z]
digit [0-9]
octal_digit [0-7]
hex_digit {digit}|[A-F]
identifier ({letter}|(_({letter}|{digit})))((_?({letter}|{digit}))*)
/*******************/
/* B.1.2 Constants */
/*******************/
/******************************/
/* B.1.2.1 Numeric literals */
/******************************/
integer {digit}((_?{digit})*)
/* Some helper symbols for parsing TIME literals... */
integer_0_59 (0(_?))*([0-5](_?))?{digit}
integer_0_19 (0(_?))*([0-1](_?))?{digit}
integer_20_23 (0(_?))*2(_?)[0-3]
integer_0_23 {integer_0_19}|{integer_20_23}
integer_0_999 {digit}((_?{digit})?)((_?{digit})?)
binary_integer 2#{bit}((_?{bit})*)
bit [0-1]
octal_integer 8#{octal_digit}((_?{octal_digit})*)
hex_integer 16#{hex_digit}((_?{hex_digit})*)
exponent [Ee]([+-]?){integer}
/* The correct definition for real would be:
* real {integer}\.{integer}({exponent}?)
*
* Unfortunately, the spec also defines fixed_point (B 1.2.3.1) as:
* fixed_point {integer}\.{integer}
*
* This means that {integer}\.{integer} could be interpreted
* as either a fixed_point or a real.
* I have opted to interpret {integer}\.{integer} as a fixed_point.
* In order to do this, the definition of real has been changed to:
* real {integer}\.{integer}{exponent}
*
* This means that the syntax parser now needs to define a real to be
* either a real_token or a fixed_point_token!
*/
real {integer}\.{integer}{exponent}
/*******************************/
/* B.1.2.2 Character Strings */
/*******************************/
/*
common_character_representation :=
|'$$'
|'$L'|'$N'|'$P'|'$R'|'$T'
|'$l'|'$n'|'$p'|'$r'|'$t'
NOTE: $ = 0x24
" = 0x22
' = 0x27
printable chars in ASCII: 0x20-0x7E
*/
esc_char_u $L|$N|$P|$R|$T
esc_char_l $l|$n|$p|$r|$t
esc_char $$|{esc_char_u}|{esc_char_l}
double_byte_char (${hex_digit}{hex_digit}{hex_digit}{hex_digit})
single_byte_char (${hex_digit}{hex_digit})
/* WARNING:
* This definition is only valid in ASCII...
*
* Flex includes the function print_char() that defines
* all printable characters portably (i.e. whatever character
* encoding is currently being used , ASCII, EBCDIC, etc...)
* Unfortunately, we cannot generate the definition of
* common_character_representation portably, since flex
* does not allow definition of sets by subtracting
* elements in one set from another set.
* This means we must build up the defintion of
* common_character_representation using only set addition,
* which leaves us with the only choice of defining the
* characters non-portably...
*/
common_character_representation [\x20\x21\x23\x25\x26\x28-\x7E]|{esc_char}
double_byte_character_representation $\"|'|{double_byte_char}|{common_character_representation}
single_byte_character_representation $'|\"|{single_byte_char}|{common_character_representation}
double_byte_character_string \"({double_byte_character_representation}*)\"
single_byte_character_string '({single_byte_character_representation}*)'
/************************/
/* B 1.2.3.1 - Duration */
/************************/
fixed_point {integer}\.{integer}
/* NOTE: The IEC 61131-3 v2 standard has an incorrect formal syntax definition of duration,
* as its definition does not match the standard's text.
* IEC 61131-3 v3 (committee draft) seems to have this fixed, so we use that
* definition instead!
*
* duration::= ('T' | 'TIME') '#' ['+'|'-'] interval
* interval::= days | hours | minutes | seconds | milliseconds
* fixed_point ::= integer [ '.' integer]
* days ::= fixed_point 'd' | integer 'd' ['_'] [ hours ]
* hours ::= fixed_point 'h' | integer 'h' ['_'] [ minutes ]
* minutes ::= fixed_point 'm' | integer 'm' ['_'] [ seconds ]
* seconds ::= fixed_point 's' | integer 's' ['_'] [ milliseconds ]
* milliseconds ::= fixed_point 'ms'
*
*
* The original IEC 61131-3 v2 definition is:
* duration ::= ('T' | 'TIME') '#' ['-'] interval
* interval ::= days | hours | minutes | seconds | milliseconds
* fixed_point ::= integer [ '.' integer]
* days ::= fixed_point 'd' | integer 'd' ['_'] hours
* hours ::= fixed_point 'h' | integer 'h' ['_'] minutes
* minutes ::= fixed_point 'm' | integer 'm' ['_'] seconds
* seconds ::= fixed_point 's' | integer 's' ['_'] milliseconds
* milliseconds ::= fixed_point 'ms'
*/
interval_ms_X ({integer_0_999}(\.{integer})?)ms
interval_s_X {integer_0_59}s(_?{interval_ms_X})?|({integer_0_59}(\.{integer})?s)
interval_m_X {integer_0_59}m(_?{interval_s_X})?|({integer_0_59}(\.{integer})?m)
interval_h_X {integer_0_23}h(_?{interval_m_X})?|({integer_0_23}(\.{integer})?h)
interval_ms {integer}ms|({fixed_point}ms)
interval_s {integer}s(_?{interval_ms_X})?|({fixed_point}s)
interval_m {integer}m(_?{interval_s_X})?|({fixed_point}m)
interval_h {integer}h(_?{interval_m_X})?|({fixed_point}h)
interval_d {integer}d(_?{interval_h_X})?|({fixed_point}d)
interval {interval_ms}|{interval_s}|{interval_m}|{interval_h}|{interval_d}
/* to help provide nice error messages, we also parse an incorrect but plausible interval... */
/* NOTE that this erroneous interval will be parsed outside the time_literal_state, so must not
* be able to parse any other legal lexcial construct (besides a legal interval, but that
* is OK as this rule will appear _after_ the rule to parse legal intervals!).
*/
fixed_point_or_integer {fixed_point}|{integer}
erroneous_interval ({fixed_point_or_integer}d_?)?({fixed_point_or_integer}h_?)?({fixed_point_or_integer}m_?)?({fixed_point_or_integer}s_?)?({fixed_point_or_integer}ms)?
/********************************************/
/* B.1.4.1 Directly Represented Variables */
/********************************************/
/* The correct definition, if the standard were to be followed... */
location_prefix [IQM]
size_prefix [XBWDL]
direct_variable_standard %{location_prefix}({size_prefix}?){integer}((.{integer})*)
/* For the MatPLC, we will accept %
* as a direct variable, this being mapped onto the MatPLC point
* named
*/
/* TODO: we should not restrict it to only the accepted syntax
* of as specified by the standard. MatPLC point names
* have a more permissive syntax.
*
* e.g. "P__234"
* Is a valid MatPLC point name, but not a valid !!
* The same happens with names such as "333", "349+23", etc...
* How can we handle these more expressive names in our case?
* Remember that some direct variable may remain anonymous, with
* declarations such as:
* VAR
* AT %I3 : BYTE := 255;
* END_VAR
* in which case we are currently using "%I3" as the variable
* name.
*/
/* direct_variable_matplc %{identifier} */
/* direct_variable {direct_variable_standard}|{direct_variable_matplc} */
direct_variable {direct_variable_standard}
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
incompl_location %[IQM]\*
%%
/* fprintf(stderr, "flex: state %d\n", YY_START); */
/*****************************************************/
/*****************************************************/
/*****************************************************/
/***** *****/
/***** *****/
/***** F I R S T T H I N G S F I R S T *****/
/***** *****/
/***** *****/
/*****************************************************/
/*****************************************************/
/*****************************************************/
/***********************************************************/
/* Handle requests sent by bison for flex to change state. */
/***********************************************************/
if (get_goto_body_state()) {
yy_push_state(body_state);
rst_goto_body_state();
}
if (get_goto_sfc_qualifier_state()) {
yy_push_state(sfc_qualifier_state);
rst_goto_sfc_qualifier_state();
}
if (get_goto_sfc_priority_state()) {
yy_push_state(sfc_priority_state);
rst_goto_sfc_priority_state();
}
if (get_goto_task_init_state()) {
yy_push_state(task_init_state);
rst_goto_task_init_state();
}
if (get_pop_state()) {
yy_pop_state();
rst_pop_state();
}
/***************************/
/* Handle the pragmas! */
/***************************/
/* We start off by searching for the pragmas we handle in the lexical parser. */
{file_include_pragma} unput_text(0); yy_push_state(include_beg);
/* Pragmas sent to syntax analyser (bison) */
{disable_code_generation_pragma} return disable_code_generation_pragma_token;
{enable_code_generation_pragma} return enable_code_generation_pragma_token;
{disable_code_generation_pragma} return disable_code_generation_pragma_token;
{enable_code_generation_pragma} return enable_code_generation_pragma_token;
/* Any other pragma we find, we just pass it up to the syntax parser... */
/* Note that the state is exclusive, so we have to include it here too. */
{pragma} {/* return the pragmma without the enclosing '{' and '}' */
int cut = yytext[1]=='{'?2:1;
yytext[strlen(yytext)-cut] = '\0';
yylval.ID=strdup(yytext+cut);
return pragma_token;
}
{pragma} {/* return the pragmma without the enclosing '{' and '}' */
int cut = yytext[1]=='{'?2:1;
yytext[strlen(yytext)-cut] = '\0';
yylval.ID=strdup(yytext+cut);
return pragma_token;
}
/*********************************/
/* Handle the file includes! */
/*********************************/
{file_include_pragma_beg} BEGIN(include_filename);
{file_include_pragma_filename} {
/* set the internal state variables of lexical analyser to process a new include file */
include_file(yytext);
/* switch to whatever state was active before the include file */
yy_pop_state();
/* now process the new file... */
}
<> { /* NOTE: Currently bison is incorrectly using END_OF_INPUT in many rules
* when checking for syntax errors in the input source code.
* This means that in reality flex will be asked to carry on reading the input
* even after it has reached the end of all (including the main) input files.
* In other owrds, we will be called to return more tokens, even after we have
* already returned an END_OF_INPUT token. In this case, we must carry on returning
* more END_OF_INPUT tokens.
*
* However, in the above case we will be asked to carry on reading more tokens
* from the main input file, after we have reached the end. For this to work
* correctly, we cannot close the main input file!
*
* This is why we WILL be called with include_stack_ptr == 0 multiple times,
* and why we must handle it as a special case
* that leaves the include_stack_ptr unchanged, and returns END_OF_INPUT once again.
*
* As a corollory, flex can never safely close the main input file, and we must ask
* bison to close it!
*/
if (include_stack_ptr == 0) {
// fclose(yyin); // Must not do this!!
// FreeTracking(current_tracking); // Must not do this!!
/* yyterminate() terminates the scanner and returns a 0 to the
* scanner's caller, indicating "all done".
*
* Our syntax parser (written with bison) has the token
* END_OF_INPUT associated to the value 0, so even though
* we don't explicitly return the token END_OF_INPUT
* calling yyterminate() is equivalent to doing that.
*/
yyterminate();
} else {
fclose(yyin);
FreeTracking(current_tracking);
--include_stack_ptr;
yy_delete_buffer(YY_CURRENT_BUFFER);
yy_switch_to_buffer((include_stack[include_stack_ptr]).buffer_state);
current_tracking = include_stack[include_stack_ptr].env;
/* removing constness of char *. This is safe actually,
* since the only real const char * that is stored on the stack is
* the first one (i.e. the one that gets stored in include_stack[0],
* which is never free'd!
*/
/* NOTE: We do __NOT__ free the malloc()'d memory since
* pointers to this filename will be kept by many objects
* in the abstract syntax tree.
* This will later be used to provide correct error
* messages during semantic analysis (stage 3)
*/
/* free((char *)current_filename); */
current_filename = include_stack[include_stack_ptr].filename;
yy_push_state(include_end);
}
}
{file_include_pragma_end} yy_pop_state();
/* handle the artificial file includes created by include_string(), which do not end with a '}' */
. unput_text(0); yy_pop_state();
/*********************************/
/* Handle all the state changes! */
/*********************************/
/* INITIAL -> header_state */
{
/* NOTE: how about functions that do not declare variables, and go directly to the body_state???
* - According to Section 2.5.1.3 (Function Declaration), item 2 in the list, a FUNCTION
* must have at least one input argument, so a correct declaration will have at least
* one VAR_INPUT ... VAR_END construct!
* - According to Section 2.5.2.2 (Function Block Declaration), a FUNCTION_BLOCK
* must have at least one input argument, so a correct declaration will have at least
* one VAR_INPUT ... VAR_END construct!
* - According to Section 2.5.3 (Programs), a PROGRAM must have at least one input
* argument, so a correct declaration will have at least one VAR_INPUT ... VAR_END
* construct!
*
* All the above means that we needn't worry about PROGRAMs, FUNCTIONs or
* FUNCTION_BLOCKs that do not have at least one VAR_END before the body_state.
* If the code has an error, and no VAR_END before the body, we will simply
* continue in the state, untill the end of the FUNCTION, FUNCTION_BLOCK
* or PROGAM.
*/
FUNCTION yy_push_state(header_state); return FUNCTION;
FUNCTION_BLOCK yy_push_state(header_state); return FUNCTION_BLOCK;
PROGRAM yy_push_state(header_state); return PROGRAM;
CONFIGURATION BEGIN(config_state); return CONFIGURATION;
}
/* INITIAL -> body_state */
/* required if the function, program, etc.. has no VAR block! */
/* We comment it out since the standard does not allow this. */
/* NOTE: Even if we were to include the following code, it */
/* would have no effect whatsoever since the above */
/* rules will take precendence! */
/*
{
FUNCTION BEGIN(body_state); return FUNCTION;
FUNCTION_BLOCK BEGIN(body_state); return FUNCTION_BLOCK;
PROGRAM BEGIN(body_state); return PROGRAM;
}
*/
/* header_state -> (vardecl_list_state) */
{
VAR | /* execute the next rule's action, i.e. fall-through! */
VAR_INPUT |
VAR_OUTPUT |
VAR_IN_OUT |
VAR_EXTERNAL |
VAR_GLOBAL |
VAR_TEMP |
VAR_CONFIG |
VAR_ACCESS unput_text(0); BEGIN(vardecl_list_state);
}
/* vardecl_list_state -> (vardecl_state | body_state | INITIAL) */
{
VAR_INPUT | /* execute the next rule's action, i.e. fall-through! */
VAR_OUTPUT |
VAR_IN_OUT |
VAR_EXTERNAL |
VAR_GLOBAL |
VAR_TEMP |
VAR_CONFIG |
VAR_ACCESS |
VAR unput_text(0); yy_push_state(vardecl_state);
END_FUNCTION unput_text(0); BEGIN(INITIAL);
END_FUNCTION_BLOCK unput_text(0); BEGIN(INITIAL);
END_PROGRAM unput_text(0); BEGIN(INITIAL);
. unput_text(0); yy_push_state(body_state); /* anything else, just change to body_state! */
}
/* vardecl_list_state -> pop to $previous_state (vardecl_list_state) */
{
END_VAR yy_pop_state(); return END_VAR; /* pop back to header_state */
}
/* body_state -> (il_state | st_state | sfc_state) */
{
INITIAL_STEP unput_text(0); BEGIN(sfc_state);
{qualified_identifier} unput_text(0); BEGIN(st_state); /* will always be followed by '[' for an array access, or ':=' as the left hand of an assignment statement */
{direct_variable_standard} unput_text(0); BEGIN(st_state); /* will always be followed by ':=' as the left hand of an assignment statement */
RETURN unput_text(0); BEGIN(st_state);
IF unput_text(0); BEGIN(st_state);
CASE unput_text(0); BEGIN(st_state);
FOR unput_text(0); BEGIN(st_state);
WHILE unput_text(0); BEGIN(st_state);
EXIT unput_text(0); BEGIN(st_state);
REPEAT unput_text(0); BEGIN(st_state);
/* ':=' occurs only in transitions, and not Function or FB bodies! */
:= unput_text(0); BEGIN(st_state);
{identifier} {int token = get_identifier_token(yytext);
if ((token == prev_declared_fb_name_token) || (token == prev_declared_variable_name_token)) {
/* the code has a call to a function block OR has an assingment with a variable as the lvalue */
unput_text(0); BEGIN(st_state);
} else
if (token == prev_declared_derived_function_name_token) {
/* the code has a call to a function - must be IL */
unput_text(0); BEGIN(il_state);
} else {
/* Might be a lable in IL, or a bug in ST/IL code. We jump to IL */
unput_text(0); BEGIN(il_state);
}
}
. unput_text(0); BEGIN(il_state); /* Don't know what it could be. This is most likely a bug. Let's just to a random state... */
} /* end of body_state lexical parser */
/* (il_state | st_state) -> pop to $previous_state (vardecl_list_state or sfc_state) */
{
END_FUNCTION yy_pop_state(); unput_text(0);
END_FUNCTION_BLOCK yy_pop_state(); unput_text(0);
END_PROGRAM yy_pop_state(); unput_text(0);
END_TRANSITION yy_pop_state(); unput_text(0);
END_ACTION yy_pop_state(); unput_text(0);
}
/* sfc_state -> pop to $previous_state (vardecl_list_state or sfc_state) */
{
END_FUNCTION yy_pop_state(); unput_text(0);
END_FUNCTION_BLOCK yy_pop_state(); unput_text(0);
END_PROGRAM yy_pop_state(); unput_text(0);
}
/* config -> INITIAL */
END_CONFIGURATION BEGIN(INITIAL); return END_CONFIGURATION;
/***************************************/
/* Next is to to remove all whitespace */
/***************************************/
/* NOTE: pragmas are handled right at the beginning... */
/* The whitespace */
{st_whitespace} /* Eat any whitespace */
{il_whitespace} /* Eat any whitespace */
/* The comments */
{comment_beg} yy_push_state(comment_state);
{comment_beg} yy_push_state(comment_state);
{
{comment_beg} {if (get_opt_nested_comments()) yy_push_state(comment_state);}
{comment_end} yy_pop_state();
. /* Ignore text inside comment! */
\n /* Ignore text inside comment! */
}
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
/* NOTE: 'R1', 'IN', etc... are IL operators, and therefore tokens
* On the other hand, the spec does not define them as keywords,
* which means they may be re-used for variable names, etc...!
* The syntax parser already caters for the possibility of these
* tokens being used for variable names in their declarations.
* When they are declared, they will be added to the variable symbol table!
* Further appearances of these tokens must no longer be parsed
* as R1_tokens etc..., but rather as variable_name_tokens!
*
* That is why the first thing we do with identifiers, even before
* checking whether they may be a 'keyword', is to check whether
* they have been previously declared as a variable name,
*
* However, we have a dilema! Should we here also check for
* prev_declared_derived_function_name_token?
* If we do, then the 'MOD' default library function (defined in
* the standard) will always be returned as a function name, and
* it will therefore not be possible to use it as an operator as
* in the following ST expression 'X := Y MOD Z;' !
* If we don't, then even it will not be possible to use 'MOD'
* as a funtion as in 'X := MOD(Y, Z);'
* We solve this by NOT testing for function names here, and
* handling this function and keyword clash in bison!
*/
/*
{identifier} {int token = get_identifier_token(yytext);
// fprintf(stderr, "flex: analysing identifier '%s'...", yytext);
if ((token == prev_declared_variable_name_token) ||
// (token == prev_declared_derived_function_name_token) || // DO NOT add this condition!
(token == prev_declared_fb_name_token)) {
// if (token != identifier_token)
// * NOTE: if we replace the above uncommented conditions with
* the simple test of (token != identifier_token), then
* 'MOD' et al must be removed from the
* library_symbol_table as a default function name!
* //
yylval.ID=strdup(yytext);
// fprintf(stderr, "returning token %d\n", token);
return token;
}
// otherwise, leave it for the other lexical parser rules...
// fprintf(stderr, "rejecting\n");
REJECT;
}
*/
/******************************************************/
/******************************************************/
/******************************************************/
/***** *****/
/***** *****/
/***** N O W D O T H E K E Y W O R D S *****/
/***** *****/
/***** *****/
/******************************************************/
/******************************************************/
/******************************************************/
REF {if (get_opt_ref_standard_extensions()) return REF; else{REJECT;}} /* Keyword in IEC 61131-3 v3 */
DREF {if (get_opt_ref_standard_extensions()) return DREF; else{REJECT;}} /* Keyword in IEC 61131-3 v3 */
REF_TO {if (get_opt_ref_standard_extensions()) return REF_TO; else{REJECT;}} /* Keyword in IEC 61131-3 v3 */
NULL {if (get_opt_ref_standard_extensions()) return NULL_token; else{REJECT;}} /* Keyword in IEC 61131-3 v3 */
EN return EN; /* Keyword */
ENO return ENO; /* Keyword */
/******************************/
/* B 1.2.1 - Numeric Literals */
/******************************/
TRUE return TRUE; /* Keyword */
BOOL#1 return boolean_true_literal_token;
BOOL#TRUE return boolean_true_literal_token;
SAFEBOOL#1 {if (get_opt_safe_extensions()) {return safeboolean_true_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
SAFEBOOL#TRUE {if (get_opt_safe_extensions()) {return safeboolean_true_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
FALSE return FALSE; /* Keyword */
BOOL#0 return boolean_false_literal_token;
BOOL#FALSE return boolean_false_literal_token;
SAFEBOOL#0 {if (get_opt_safe_extensions()) {return safeboolean_false_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
SAFEBOOL#FALSE {if (get_opt_safe_extensions()) {return safeboolean_false_literal_token;} else{REJECT;}} /* Keyword (Data Type) */
/************************/
/* B 1.2.3.1 - Duration */
/************************/
t# return T_SHARP; /* Delimiter */
T# return T_SHARP; /* Delimiter */
TIME return TIME; /* Keyword (Data Type) */
/************************************/
/* B 1.2.3.2 - Time of day and Date */
/************************************/
TIME_OF_DAY return TIME_OF_DAY; /* Keyword (Data Type) */
TOD return TIME_OF_DAY; /* Keyword (Data Type) */
DATE return DATE; /* Keyword (Data Type) */
d# return D_SHARP; /* Delimiter */
D# return D_SHARP; /* Delimiter */
DATE_AND_TIME return DATE_AND_TIME; /* Keyword (Data Type) */
DT return DATE_AND_TIME; /* Keyword (Data Type) */
/***********************************/
/* B 1.3.1 - Elementary Data Types */
/***********************************/
BOOL return BOOL; /* Keyword (Data Type) */
BYTE return BYTE; /* Keyword (Data Type) */
WORD return WORD; /* Keyword (Data Type) */
DWORD return DWORD; /* Keyword (Data Type) */
LWORD return LWORD; /* Keyword (Data Type) */
SINT return SINT; /* Keyword (Data Type) */
INT return INT; /* Keyword (Data Type) */
DINT return DINT; /* Keyword (Data Type) */
LINT return LINT; /* Keyword (Data Type) */
USINT return USINT; /* Keyword (Data Type) */
UINT return UINT; /* Keyword (Data Type) */
UDINT return UDINT; /* Keyword (Data Type) */
ULINT return ULINT; /* Keyword (Data Type) */
REAL return REAL; /* Keyword (Data Type) */
LREAL return LREAL; /* Keyword (Data Type) */
WSTRING return WSTRING; /* Keyword (Data Type) */
STRING return STRING; /* Keyword (Data Type) */
TIME return TIME; /* Keyword (Data Type) */
DATE return DATE; /* Keyword (Data Type) */
DT return DT; /* Keyword (Data Type) */
TOD return TOD; /* Keyword (Data Type) */
DATE_AND_TIME return DATE_AND_TIME; /* Keyword (Data Type) */
TIME_OF_DAY return TIME_OF_DAY; /* Keyword (Data Type) */
/*****************************************************************/
/* Keywords defined in "Safety Software Technical Specification" */
/*****************************************************************/
/*
* NOTE: The following keywords are define in
* "Safety Software Technical Specification,
* Part 1: Concepts and Function Blocks,
* Version 1.0 – Official Release"
* written by PLCopen - Technical Committee 5
*
* We only support these extensions and keywords
* if the apropriate command line option is given.
*/
SAFEBOOL {if (get_opt_safe_extensions()) {return SAFEBOOL;} else {REJECT;}}
SAFEBYTE {if (get_opt_safe_extensions()) {return SAFEBYTE;} else {REJECT;}}
SAFEWORD {if (get_opt_safe_extensions()) {return SAFEWORD;} else {REJECT;}}
SAFEDWORD {if (get_opt_safe_extensions()) {return SAFEDWORD;} else{REJECT;}}
SAFELWORD {if (get_opt_safe_extensions()) {return SAFELWORD;} else{REJECT;}}
SAFEREAL {if (get_opt_safe_extensions()) {return SAFESINT;} else{REJECT;}}
SAFELREAL {if (get_opt_safe_extensions()) {return SAFELREAL;} else{REJECT;}}
SAFESINT {if (get_opt_safe_extensions()) {return SAFESINT;} else{REJECT;}}
SAFEINT {if (get_opt_safe_extensions()) {return SAFEINT;} else{REJECT;}}
SAFEDINT {if (get_opt_safe_extensions()) {return SAFEDINT;} else{REJECT;}}
SAFELINT {if (get_opt_safe_extensions()) {return SAFELINT;} else{REJECT;}}
SAFEUSINT {if (get_opt_safe_extensions()) {return SAFEUSINT;} else{REJECT;}}
SAFEUINT {if (get_opt_safe_extensions()) {return SAFEUINT;} else{REJECT;}}
SAFEUDINT {if (get_opt_safe_extensions()) {return SAFEUDINT;} else{REJECT;}}
SAFEULINT {if (get_opt_safe_extensions()) {return SAFEULINT;} else{REJECT;}}
/* SAFESTRING and SAFEWSTRING are not yet supported, i.e. checked correctly, in the semantic analyser (stage 3) */
/* so it is best not to support them at all... */
/*
SAFEWSTRING {if (get_opt_safe_extensions()) {return SAFEWSTRING;} else{REJECT;}}
SAFESTRING {if (get_opt_safe_extensions()) {return SAFESTRING;} else{REJECT;}}
*/
SAFETIME {if (get_opt_safe_extensions()) {return SAFETIME;} else{REJECT;}}
SAFEDATE {if (get_opt_safe_extensions()) {return SAFEDATE;} else{REJECT;}}
SAFEDT {if (get_opt_safe_extensions()) {return SAFEDT;} else{REJECT;}}
SAFETOD {if (get_opt_safe_extensions()) {return SAFETOD;} else{REJECT;}}
SAFEDATE_AND_TIME {if (get_opt_safe_extensions()) {return SAFEDATE_AND_TIME;} else{REJECT;}}
SAFETIME_OF_DAY {if (get_opt_safe_extensions()) {return SAFETIME_OF_DAY;} else{REJECT;}}
/********************************/
/* B 1.3.2 - Generic data types */
/********************************/
/* Strangely, the following symbols do not seem to be required! */
/* But we include them so they become reserved words, and do not
* get passed up to bison as an identifier...
*/
ANY return ANY; /* Keyword (Data Type) */
ANY_DERIVED return ANY_DERIVED; /* Keyword (Data Type) */
ANY_ELEMENTARY return ANY_ELEMENTARY; /* Keyword (Data Type) */
ANY_MAGNITUDE return ANY_MAGNITUDE; /* Keyword (Data Type) */
ANY_NUM return ANY_NUM; /* Keyword (Data Type) */
ANY_REAL return ANY_REAL; /* Keyword (Data Type) */
ANY_INT return ANY_INT; /* Keyword (Data Type) */
ANY_BIT return ANY_BIT; /* Keyword (Data Type) */
ANY_STRING return ANY_STRING; /* Keyword (Data Type) */
ANY_DATE return ANY_DATE; /* Keyword (Data Type) */
/********************************/
/* B 1.3.3 - Derived data types */
/********************************/
":=" return ASSIGN; /* Delimiter */
".." return DOTDOT; /* Delimiter */
TYPE return TYPE; /* Keyword */
END_TYPE return END_TYPE; /* Keyword */
ARRAY return ARRAY; /* Keyword */
OF return OF; /* Keyword */
STRUCT return STRUCT; /* Keyword */
END_STRUCT return END_STRUCT; /* Keyword */
/*********************/
/* B 1.4 - Variables */
/*********************/
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
VAR_INPUT return VAR_INPUT; /* Keyword */
VAR_OUTPUT return VAR_OUTPUT; /* Keyword */
VAR_IN_OUT return VAR_IN_OUT; /* Keyword */
VAR_EXTERNAL return VAR_EXTERNAL; /* Keyword */
VAR_GLOBAL return VAR_GLOBAL; /* Keyword */
END_VAR return END_VAR; /* Keyword */
RETAIN return RETAIN; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
R_EDGE return R_EDGE; /* Keyword */
F_EDGE return F_EDGE; /* Keyword */
AT return AT; /* Keyword */
/***********************/
/* B 1.5.1 - Functions */
/***********************/
FUNCTION return FUNCTION; /* Keyword */
END_FUNCTION return END_FUNCTION; /* Keyword */
VAR return VAR; /* Keyword */
CONSTANT return CONSTANT; /* Keyword */
/*****************************/
/* B 1.5.2 - Function Blocks */
/*****************************/
FUNCTION_BLOCK return FUNCTION_BLOCK; /* Keyword */
END_FUNCTION_BLOCK return END_FUNCTION_BLOCK; /* Keyword */
VAR_TEMP return VAR_TEMP; /* Keyword */
VAR return VAR; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
END_VAR return END_VAR; /* Keyword */
/**********************/
/* B 1.5.3 - Programs */
/**********************/
PROGRAM return PROGRAM; /* Keyword */
END_PROGRAM return END_PROGRAM; /* Keyword */
/********************************************/
/* B 1.6 Sequential Function Chart elements */
/********************************************/
/* NOTE: the following identifiers/tokens clash with the R and S IL operators, as well
.* as other identifiers that may be used as variable names inside IL and ST programs.
* They will have to be handled when we include parsing of SFC... For now, simply
* ignore them!
*/
ACTION return ACTION; /* Keyword */
END_ACTION return END_ACTION; /* Keyword */
TRANSITION return TRANSITION; /* Keyword */
END_TRANSITION return END_TRANSITION; /* Keyword */
FROM return FROM; /* Keyword */
TO return TO; /* Keyword */
INITIAL_STEP return INITIAL_STEP; /* Keyword */
STEP return STEP; /* Keyword */
END_STEP return END_STEP; /* Keyword */
/* PRIORITY is not a keyword, so we only return it when
* it is explicitly required and we are not expecting any identifiers
* that could also use the same letter sequence (i.e. an identifier: piority)
*/
PRIORITY return PRIORITY;
{
L return L;
D return D;
SD return SD;
DS return DS;
SL return SL;
N return N;
P return P;
P0 return P0;
P1 return P1;
R return R;
S return S;
}
/********************************/
/* B 1.7 Configuration elements */
/********************************/
CONFIGURATION return CONFIGURATION; /* Keyword */
END_CONFIGURATION return END_CONFIGURATION; /* Keyword */
TASK return TASK; /* Keyword */
RESOURCE return RESOURCE; /* Keyword */
ON return ON; /* Keyword */
END_RESOURCE return END_RESOURCE; /* Keyword */
VAR_CONFIG return VAR_CONFIG; /* Keyword */
VAR_ACCESS return VAR_ACCESS; /* Keyword */
END_VAR return END_VAR; /* Keyword */
WITH return WITH; /* Keyword */
PROGRAM return PROGRAM; /* Keyword */
RETAIN return RETAIN; /* Keyword */
NON_RETAIN return NON_RETAIN; /* Keyword */
READ_WRITE return READ_WRITE; /* Keyword */
READ_ONLY return READ_ONLY; /* Keyword */
/* PRIORITY, SINGLE and INTERVAL are not a keywords, so we only return them when
* it is explicitly required and we are not expecting any identifiers
* that could also use the same letter sequence (i.e. an identifier: piority, ...)
*/
{
PRIORITY return PRIORITY;
SINGLE return SINGLE;
INTERVAL return INTERVAL;
}
/***********************************/
/* B 2.1 Instructions and Operands */
/***********************************/
\n return EOL;
/*******************/
/* B 2.2 Operators */
/*******************/
/* NOTE: we can't have flex return the same token for
* ANDN and &N, neither for AND and &, since
* AND and ANDN are considered valid variable
* function or functionblock type names!
* This means that the parser may decide that the
* AND or ANDN strings found in the source code
* are being used as variable names
* and not as operators, and will therefore transform
* these tokens into indentifier tokens!
* We can't have the parser thinking that the source
* code contained the string AND (which may be interpreted
* as a vairable name) when in reality the source code
* merely contained the character &, so we use two
* different tokens for & and AND (and similarly
* ANDN and &N)!
*/
/* The following tokens clash with ST expression operators and Standard Functions */
/* They are also keywords! */
AND return AND; /* Keyword */
MOD return MOD; /* Keyword */
OR return OR; /* Keyword */
XOR return XOR; /* Keyword */
NOT return NOT; /* Keyword */
/* The following tokens clash with Standard Functions */
/* They are keywords because they are a function name */
{
ADD return ADD; /* Keyword (Standard Function) */
DIV return DIV; /* Keyword (Standard Function) */
EQ return EQ; /* Keyword (Standard Function) */
GE return GE; /* Keyword (Standard Function) */
GT return GT; /* Keyword (Standard Function) */
LE return LE; /* Keyword (Standard Function) */
LT return LT; /* Keyword (Standard Function) */
MUL return MUL; /* Keyword (Standard Function) */
NE return NE; /* Keyword (Standard Function) */
SUB return SUB; /* Keyword (Standard Function) */
}
/* The following tokens clash with SFC action qualifiers */
/* They are not keywords! */
{
S return S;
R return R;
}
/* The following tokens clash with ST expression operators */
& return AND2; /* NOT a Delimiter! */
/* The following tokens have no clashes */
/* They are not keywords! */
{
LD return LD;
LDN return LDN;
ST return ST;
STN return STN;
S1 return S1;
R1 return R1;
CLK return CLK;
CU return CU;
CD return CD;
PV return PV;
IN return IN;
PT return PT;
ANDN return ANDN;
&N return ANDN2;
ORN return ORN;
XORN return XORN;
CAL return CAL;
CALC return CALC;
CALCN return CALCN;
RET return RET;
RETC return RETC;
RETCN return RETCN;
JMP return JMP;
JMPC return JMPC;
JMPCN return JMPCN;
}
/***********************/
/* B 3.1 - Expressions */
/***********************/
"**" return OPER_EXP; /* NOT a Delimiter! */
"<>" return OPER_NE; /* NOT a Delimiter! */
">=" return OPER_GE; /* NOT a Delimiter! */
"<=" return OPER_LE; /* NOT a Delimiter! */
& return AND2; /* NOT a Delimiter! */
AND return AND; /* Keyword */
XOR return XOR; /* Keyword */
OR return OR; /* Keyword */
NOT return NOT; /* Keyword */
MOD return MOD; /* Keyword */
/*****************************************/
/* B 3.2.2 Subprogram Control Statements */
/*****************************************/
:= return ASSIGN; /* Delimiter */
=> return SENDTO; /* Delimiter */
RETURN return RETURN; /* Keyword */
/********************************/
/* B 3.2.3 Selection Statements */
/********************************/
IF return IF; /* Keyword */
THEN return THEN; /* Keyword */
ELSIF return ELSIF; /* Keyword */
ELSE return ELSE; /* Keyword */
END_IF return END_IF; /* Keyword */
CASE return CASE; /* Keyword */
OF return OF; /* Keyword */
ELSE return ELSE; /* Keyword */
END_CASE return END_CASE; /* Keyword */
/********************************/
/* B 3.2.4 Iteration Statements */
/********************************/
FOR return FOR; /* Keyword */
TO return TO; /* Keyword */
BY return BY; /* Keyword */
DO return DO; /* Keyword */
END_FOR return END_FOR; /* Keyword */
WHILE return WHILE; /* Keyword */
DO return DO; /* Keyword */
END_WHILE return END_WHILE; /* Keyword */
REPEAT return REPEAT; /* Keyword */
UNTIL return UNTIL; /* Keyword */
END_REPEAT return END_REPEAT; /* Keyword */
EXIT return EXIT; /* Keyword */
/********************************************************/
/********************************************************/
/********************************************************/
/***** *****/
/***** *****/
/***** N O W W O R K W I T H V A L U E S *****/
/***** *****/
/***** *****/
/********************************************************/
/********************************************************/
/********************************************************/
/********************************************/
/* B.1.4.1 Directly Represented Variables */
/********************************************/
{direct_variable} {yylval.ID=strdup(yytext); return get_direct_variable_token(yytext);}
/******************************************/
/* B 1.4.3 - Declaration & Initialisation */
/******************************************/
{incompl_location} {yylval.ID=strdup(yytext); return incompl_location_token;}
/************************/
/* B 1.2.3.1 - Duration */
/************************/
{fixed_point} {yylval.ID=strdup(yytext); return fixed_point_token;}
{interval} {/*fprintf(stderr, "entering time_literal_state ##%s##\n", yytext);*/ unput_and_mark('#'); yy_push_state(time_literal_state);}
{erroneous_interval} {return erroneous_interval_token;}
{
{integer}d {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_d_token;}
{integer}h {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_h_token;}
{integer}m {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_m_token;}
{integer}s {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return integer_s_token;}
{integer}ms {yylval.ID=strdup(yytext); yylval.ID[yyleng-2] = '\0'; return integer_ms_token;}
{fixed_point}d {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_d_token;}
{fixed_point}h {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_h_token;}
{fixed_point}m {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_m_token;}
{fixed_point}s {yylval.ID=strdup(yytext); yylval.ID[yyleng-1] = '\0'; return fixed_point_s_token;}
{fixed_point}ms {yylval.ID=strdup(yytext); yylval.ID[yyleng-2] = '\0'; return fixed_point_ms_token;}
_ /* do nothing - eat it up!*/
\# {/*fprintf(stderr, "popping from time_literal_state (###)\n");*/ yy_pop_state(); return end_interval_token;}
. {/*fprintf(stderr, "time_literal_state: found invalid character '%s'. Aborting!\n", yytext);*/ ERROR;}
\n {ERROR;}
}
/*******************************/
/* B.1.2.2 Character Strings */
/*******************************/
{double_byte_character_string} {yylval.ID=strdup(yytext); return double_byte_character_string_token;}
{single_byte_character_string} {yylval.ID=strdup(yytext); return single_byte_character_string_token;}
/******************************/
/* B.1.2.1 Numeric literals */
/******************************/
{integer} {yylval.ID=strdup(yytext); return integer_token;}
{real} {yylval.ID=strdup(yytext); return real_token;}
{binary_integer} {yylval.ID=strdup(yytext); return binary_integer_token;}
{octal_integer} {yylval.ID=strdup(yytext); return octal_integer_token;}
{hex_integer} {yylval.ID=strdup(yytext); return hex_integer_token;}
/*****************************************/
/* B.1.1 Letters, digits and identifiers */
/*****************************************/
{identifier}/({st_whitespace_or_pragma_or_comment})"=>" {yylval.ID=strdup(yytext); return sendto_identifier_token;}
{identifier}/({il_whitespace_or_pragma_or_comment})"=>" {yylval.ID=strdup(yytext); return sendto_identifier_token;}
{identifier} {yylval.ID=strdup(yytext);
// printf("returning identifier...: %s, %d\n", yytext, get_identifier_token(yytext));
return get_identifier_token(yytext);}
/************************************************/
/************************************************/
/************************************************/
/***** *****/
/***** *****/
/***** T H E L E F T O V E R S . . . *****/
/***** *****/
/***** *****/
/************************************************/
/************************************************/
/************************************************/
/* do the single character tokens...
*
* e.g.: ':' '(' ')' '+' '*' ...
*/
. {return yytext[0];}
%%
/*************************/
/* Tracking Functions... */
/*************************/
#define MAX_LINE_LENGTH 1024
tracking_t *GetNewTracking(FILE* in_file) {
tracking_t* new_env = new tracking_t;
new_env->eof = 0;
new_env->lineNumber = 0;
new_env->currentChar = 0;
new_env->lineLength = 0;
new_env->currentTokenStart = 0;
new_env->buffer = (char*)malloc(MAX_LINE_LENGTH);
new_env->in_file = in_file;
return new_env;
}
void FreeTracking(tracking_t *tracking) {
free(tracking->buffer);
delete tracking;
}
/* GetNextChar: reads a character from input */
int GetNextChar(char *b, int maxBuffer) {
char *p;
if ( current_tracking->eof )
return 0;
while ( current_tracking->currentChar >= current_tracking->lineLength ) {
current_tracking->currentChar = 0;
current_tracking->currentTokenStart = 1;
current_tracking->eof = false;
p = fgets(current_tracking->buffer, MAX_LINE_LENGTH, current_tracking->in_file);
if ( p == NULL ) {
if ( ferror(current_tracking->in_file) )
return 0;
current_tracking->eof = true;
return 0;
}
current_tracking->lineLength = strlen(current_tracking->buffer);
/* only increment line number if the buffer was big enough to read the whole line! */
char last_char = current_tracking->buffer[current_tracking->lineLength - 1];
if (('\n' == last_char) || ('\r' == last_char)) // '\r' ---> CR, '\n' ---> LF
current_tracking->lineNumber++;
}
b[0] = current_tracking->buffer[current_tracking->currentChar];
if (b[0] == ' ' || b[0] == '\t')
current_tracking->currentTokenStart++;
current_tracking->currentChar++;
return b[0]==0?0:1;
}
/***********************************/
/* Utility function definitions... */
/***********************************/
/* print the include file stack to stderr... */
void print_include_stack(void) {
int i;
if ((include_stack_ptr - 1) >= 0)
fprintf (stderr, "in file ");
for (i = include_stack_ptr - 1; i >= 0; i--)
fprintf (stderr, "included from file %s:%d\n", include_stack[i].filename, include_stack[i].env->lineNumber);
}
/* set the internal state variables of lexical analyser to process a new include file */
void handle_include_file_(FILE *filehandle, const char *filename) {
if (include_stack_ptr >= MAX_INCLUDE_DEPTH) {
fprintf(stderr, "Includes nested too deeply\n");
exit( 1 );
}
yyin = filehandle;
include_stack[include_stack_ptr].buffer_state = YY_CURRENT_BUFFER;
include_stack[include_stack_ptr].env = current_tracking;
include_stack[include_stack_ptr].filename = current_filename;
current_filename = strdup(filename);
current_tracking = GetNewTracking(yyin);
include_stack_ptr++;
/* switch input buffer to new file... */
yy_switch_to_buffer(yy_create_buffer(yyin, YY_BUF_SIZE));
}
/* insert the code (in ) into the source code we are parsing.
* This is done by creating an artificial file with that new source code, and then 'including' the file
*/
void include_string_(const char *source_code) {
FILE *tmp_file = tmpfile();
if(tmp_file == NULL) {
perror("Error creating temp file.");
exit(EXIT_FAILURE);
}
fwrite((void *)source_code, 1, strlen(source_code), tmp_file);
rewind(tmp_file);
/* now parse the tmp file, by asking flex to handle it as if it had been included with the (*#include ... *) pragma... */
handle_include_file_(tmp_file, "");
//fclose(tmp_file); /* do NOT close file. It must only be closed when we finish reading from it! */
}
/* Open an include file, and set the internal state variables of lexical analyser to process a new include file */
void include_file(const char *filename) {
FILE *filehandle = NULL;
for (int i = 0; (INCLUDE_DIRECTORIES[i] != NULL) && (filehandle == NULL); i++) {
char *full_name;
full_name = strdup3(INCLUDE_DIRECTORIES[i], "/", filename);
if (full_name == NULL) {
fprintf(stderr, "Out of memory!\n");
exit( 1 );
}
filehandle = fopen(full_name, "r");
free(full_name);
}
if (NULL == filehandle) {
fprintf(stderr, "Error opening included file %s\n", filename);
exit( 1 );
}
/* now process the new file... */
handle_include_file_(filehandle, filename);
}
/* return all the text in the current token back to the input stream, except the first n chars. */
void unput_text(unsigned int n) {
/* it seems that flex has a bug in that it will not correctly count the line numbers
* if we return newlines back to the input stream. These newlines will be re-counted
* a second time when they are processed again by flex.
* We therefore determine how many newlines are in the text we are returning,
* and decrement the line counter acordingly...
*/
/*
unsigned int i;
for (i = n; i < strlen(yytext); i++)
if (yytext[i] == '\n')
current_tracking->lineNumber--;
*/
/* now return all the text back to the input stream... */
yyless(n);
}
/* return all the text in the current token back to the input stream,
* but first return to the stream an additional character to mark the end of the token.
*/
void unput_and_mark(const char c) {
char *yycopy = strdup( yytext ); /* unput() destroys yytext, so we copy it first */
unput(c);
for (int i = yyleng-1; i >= 0; i--)
unput(yycopy[i]);
free(yycopy);
}
/* Called by flex when it reaches the end-of-file */
int yywrap(void)
{
/* We reached the end of the input file... */
/* Should we continue with another file? */
/* If so:
* open the new file...
* return 0;
*/
/* to stop processing...
* return 1;
*/
return 1; /* Stop scanning at end of input file. */
}
/*******************************/
/* Public Interface for Bison. */
/*******************************/
/* The following functions will be called from inside bison code! */
void include_string(const char *source_code) {include_string_(source_code);}
/* Tell flex which file to parse. This function will not imediately start parsing the file.
* To parse the file, you then need to call yyparse()
*
* Returns NULL on error opening the file (and a valid errno), or 0 on success.
* Caller must close the file!
*/
FILE *parse_file(const char *filename) {
FILE *filehandle = NULL;
if((filehandle = fopen(filename, "r")) != NULL) {
yyin = filehandle;
current_filename = strdup(filename);
current_tracking = GetNewTracking(yyin);
}
return filehandle;
}
/*************************************/
/* Include a main() function to test */
/* the token parsing by flex.... */
/*************************************/
#ifdef TEST_MAIN
#include "../util/symtable.hh"
yystype yylval;
YYLTYPE yylloc;
int get_identifier_token(const char *identifier_str) {return 0;}
int get_direct_variable_token(const char *direct_variable_str) {return 0;}
int main(int argc, char **argv) {
FILE *in_file;
int res;
if (argc == 1) {
/* Work as an interactive (command line) parser... */
while((res=yylex()))
fprintf(stderr, "(line %d)token: %d\n", yylineno, res);
} else {
/* Work as non-interactive (file) parser... */
if((in_file = fopen(argv[1], "r")) == NULL) {
char *errmsg = strdup2("Error opening main file ", argv[1]);
perror(errmsg);
free(errmsg);
return -1;
}
/* parse the file... */
yyin = in_file;
current_filename = argv[1];
while(1) {
res=yylex();
fprintf(stderr, "(line %d)token: %d (%s)\n", yylineno, res, yylval.ID);
}
}
return 0;
}
#endif