Files
Sheerka-Old/src/parsers/ExpressionParser.py
T

69 lines
2.7 KiB
Python

from core.tokenizer import TokenKind
from core.utils import get_text_from_tokens
from parsers.BaseExpressionParser import NameExprNode, VariableNode, BaseExpressionParser
from parsers.FunctionParser import FunctionParser
from parsers.LogicalOperatorParser import LogicalOperatorParser
from parsers.RelationalOperatorParser import RelationalOperatorParser
class ExpressionParser(BaseExpressionParser):
"""
Parses expressions
"""
NAME = "Expression"
def __init__(self, **kwargs):
super().__init__(ExpressionParser.NAME, 60, False, yield_eof=False)
self.variable_parser = VariableOrNamesParser()
self.function_parser = FunctionParser(expr_parser=self, tokens_parser=self.variable_parser)
self.relational_parser = RelationalOperatorParser(expr_parser=self.function_parser)
self.logical_parser = LogicalOperatorParser(expr_parser=self.relational_parser)
def parse_input(self, context, parser_input, error_sink):
return self.logical_parser.parse_input(context, parser_input, error_sink)
def parse_tokens_stop_condition(self, token, parser_input):
pass
class VariableOrNamesParser(BaseExpressionParser):
NAME = "VariableOrNames"
def __init__(self, **kwargs):
super().__init__(VariableOrNamesParser.NAME, 60, False, yield_eof=False)
def parse_input(self, context, parser_input, error_sink):
# try to recognize a VariableNode
dots_found = []
pos = parser_input.pos
for i, token in enumerate(parser_input.as_tokens()):
if token.type == TokenKind.DOT:
dots_found.append(i + pos)
continue
if not (token.type == TokenKind.WHITESPACE or
token.type == TokenKind.IDENTIFIER and token.value.isidentifier()):
return NameExprNode(parser_input.start, parser_input.end, parser_input.as_tokens())
if len(dots_found) == 0:
return VariableNode(pos, parser_input.end, parser_input.as_tokens(), parser_input.as_text())
parts = []
current_dot_pos = pos
for dot_found in dots_found:
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: dot_found]))
current_dot_pos = dot_found + 1
# do not forget the trailing part
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: parser_input.end + 1]))
return VariableNode(parser_input.start,
parser_input.end,
parser_input.as_tokens(),
parts[0],
*parts[1:])
def parse_tokens_stop_condition(self, token, parser_input):
pass