Fixed #49 : ExpressionParser: Implement ExpressionParser
This commit is contained in:
@@ -1,9 +1,8 @@
|
|||||||
from core.tokenizer import TokenKind
|
from parsers.BaseExpressionParser import BaseExpressionParser
|
||||||
from core.utils import get_text_from_tokens
|
|
||||||
from parsers.BaseExpressionParser import NameExprNode, VariableNode, BaseExpressionParser
|
|
||||||
from parsers.FunctionParser import FunctionParser
|
from parsers.FunctionParser import FunctionParser
|
||||||
from parsers.LogicalOperatorParser import LogicalOperatorParser
|
from parsers.LogicalOperatorParser import LogicalOperatorParser
|
||||||
from parsers.RelationalOperatorParser import RelationalOperatorParser
|
from parsers.RelationalOperatorParser import RelationalOperatorParser
|
||||||
|
from parsers.VariableOrNamesParser import VariableOrNamesParser
|
||||||
|
|
||||||
|
|
||||||
class ExpressionParser(BaseExpressionParser):
|
class ExpressionParser(BaseExpressionParser):
|
||||||
@@ -25,44 +24,3 @@ class ExpressionParser(BaseExpressionParser):
|
|||||||
|
|
||||||
def parse_tokens_stop_condition(self, token, parser_input):
|
def parse_tokens_stop_condition(self, token, parser_input):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class VariableOrNamesParser(BaseExpressionParser):
|
|
||||||
NAME = "VariableOrNames"
|
|
||||||
|
|
||||||
def __init__(self, **kwargs):
|
|
||||||
super().__init__(VariableOrNamesParser.NAME, 60, False, yield_eof=False)
|
|
||||||
|
|
||||||
def parse_input(self, context, parser_input, error_sink):
|
|
||||||
# try to recognize a VariableNode
|
|
||||||
dots_found = []
|
|
||||||
pos = parser_input.pos
|
|
||||||
for i, token in enumerate(parser_input.as_tokens()):
|
|
||||||
if token.type == TokenKind.DOT:
|
|
||||||
dots_found.append(i + pos)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not (token.type == TokenKind.WHITESPACE or
|
|
||||||
token.type == TokenKind.IDENTIFIER and token.value.isidentifier()):
|
|
||||||
return NameExprNode(parser_input.start, parser_input.end, parser_input.as_tokens())
|
|
||||||
|
|
||||||
if len(dots_found) == 0:
|
|
||||||
return VariableNode(pos, parser_input.end, parser_input.as_tokens(), parser_input.as_text())
|
|
||||||
|
|
||||||
parts = []
|
|
||||||
current_dot_pos = pos
|
|
||||||
for dot_found in dots_found:
|
|
||||||
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: dot_found]))
|
|
||||||
current_dot_pos = dot_found + 1
|
|
||||||
|
|
||||||
# do not forget the trailing part
|
|
||||||
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: parser_input.end + 1]))
|
|
||||||
|
|
||||||
return VariableNode(parser_input.start,
|
|
||||||
parser_input.end,
|
|
||||||
parser_input.as_tokens(),
|
|
||||||
parts[0],
|
|
||||||
*parts[1:])
|
|
||||||
|
|
||||||
def parse_tokens_stop_condition(self, token, parser_input):
|
|
||||||
pass
|
|
||||||
|
|||||||
@@ -0,0 +1,44 @@
|
|||||||
|
from core.tokenizer import TokenKind
|
||||||
|
from core.utils import get_text_from_tokens
|
||||||
|
from parsers.BaseExpressionParser import BaseExpressionParser, NameExprNode, VariableNode
|
||||||
|
|
||||||
|
|
||||||
|
class VariableOrNamesParser(BaseExpressionParser):
|
||||||
|
NAME = "VariableOrNames"
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
super().__init__(VariableOrNamesParser.NAME, 60, False, yield_eof=False)
|
||||||
|
|
||||||
|
def parse_input(self, context, parser_input, error_sink):
|
||||||
|
# try to recognize a VariableNode
|
||||||
|
dots_found = []
|
||||||
|
pos = parser_input.pos
|
||||||
|
for i, token in enumerate(parser_input.as_tokens()):
|
||||||
|
if token.type == TokenKind.DOT:
|
||||||
|
dots_found.append(i + pos)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not (token.type == TokenKind.WHITESPACE or
|
||||||
|
token.type == TokenKind.IDENTIFIER and token.value.isidentifier()):
|
||||||
|
return NameExprNode(parser_input.start, parser_input.end, parser_input.as_tokens())
|
||||||
|
|
||||||
|
if len(dots_found) == 0:
|
||||||
|
return VariableNode(pos, parser_input.end, parser_input.as_tokens(), parser_input.as_text())
|
||||||
|
|
||||||
|
parts = []
|
||||||
|
current_dot_pos = pos
|
||||||
|
for dot_found in dots_found:
|
||||||
|
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: dot_found]))
|
||||||
|
current_dot_pos = dot_found + 1
|
||||||
|
|
||||||
|
# do not forget the trailing part
|
||||||
|
parts.append(get_text_from_tokens(parser_input.tokens[current_dot_pos: parser_input.end + 1]))
|
||||||
|
|
||||||
|
return VariableNode(parser_input.start,
|
||||||
|
parser_input.end,
|
||||||
|
parser_input.as_tokens(),
|
||||||
|
parts[0],
|
||||||
|
*parts[1:])
|
||||||
|
|
||||||
|
def parse_tokens_stop_condition(self, token, parser_input):
|
||||||
|
pass
|
||||||
Reference in New Issue
Block a user