#ifndef LEXER_UTILS_H #define LEXER_UTILS_H #include #include struct lexer_context { char *end_previous_token; ssize_t remaining_chars; struct token *previous_token; struct token *current_token; }; /* @brief: frees all fields of ctx and sets ctx to NULL. */ void destroy_lexer_context(struct lexer_context **ctx); enum lexing_mode { LEXER_NORMAL, LEXER_QUOTE, LEXER_DOUBLE_QUOTE }; enum token_type { // Special characters TOKEN_NULL = 0, TOKEN_EOF, TOKEN_WORD, TOKEN_NEWLINE, // WARNING: quote and double quote should never be used inside a token. TOKEN_QUOTE, TOKEN_DOUBLE_QUOTE, TOKEN_GRAVE, TOKEN_SEMICOLON, TOKEN_COMMENT, TOKEN_PIPE, TOKEN_AMPERSAND, TOKEN_BACKSLASH, TOKEN_DOLLAR, TOKEN_LEFT_PAREN, TOKEN_RIGHT_PAREN, TOKEN_LEFT_BRACKET, TOKEN_RIGHT_BRACKET, TOKEN_LESS, TOKEN_GREATER, TOKEN_STAR, // Keywords TOKEN_IF, TOKEN_THEN, TOKEN_ELSE, TOKEN_FI, TOKEN_ELIF }; struct token { enum token_type type; char *data; }; /* * @brief: return a newly allocated token, with the corresponding type. * The data contains [size] char, starting from [begin]. * * @return: NULL on error, a token otherwise. */ struct token *new_token(char *begin, ssize_t size); /* @brief: frees the token given in argument */ void free_token(struct token **tok); /* * @brief: checks if the stream used for the last token creation is empty. * If it is, it calls stream_read() from IO_backend, * and sets [remaining_chars]. * If not, it starts from the end of the last token. * Also trims left blanks before returning. * * @return: char* stream from which we tokenise. */ void stream_init(struct lexer_context *ctx); /* * @brief: drops the current stream and asks IOB for a new one */ void get_next_stream(struct lexer_context *ctx); #endif /* LEXER_UTILS_H */