First implementation of questions management
This commit is contained in:
@@ -3,7 +3,9 @@ from typing import List, Tuple, Callable
|
||||
|
||||
from core.builtin_concepts import BuiltinConcepts
|
||||
from core.concept import Concept
|
||||
from parsers.BaseParser import Node
|
||||
from core.sheerka.services.SheerkaExecute import ParserInput
|
||||
from core.tokenizer import LexerError, TokenKind, Token
|
||||
from parsers.BaseParser import Node, BaseParser, UnexpectedTokenErrorNode, UnexpectedEof, ErrorNode
|
||||
|
||||
|
||||
class ExprNode(Node):
|
||||
@@ -16,6 +18,29 @@ class ExprNode(Node):
|
||||
return True
|
||||
|
||||
|
||||
@dataclass()
|
||||
class LeftPartNotFoundError(ErrorNode):
|
||||
"""
|
||||
When the expression starts with 'or' or 'and'
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class NameExprNode(ExprNode):
|
||||
def __init__(self, tokens):
|
||||
self.tokens = tokens
|
||||
self.value = "".join([t.str_value for t in self.tokens])
|
||||
|
||||
def eval(self, obj):
|
||||
return self.value
|
||||
|
||||
def __repr__(self):
|
||||
return f"NameExprNode('{self.value}')"
|
||||
|
||||
def __str__(self):
|
||||
return self.value
|
||||
|
||||
|
||||
@dataclass
|
||||
class PropertyEqualsNode(ExprNode):
|
||||
prop: str
|
||||
@@ -110,6 +135,12 @@ class AndNode(ExprNode):
|
||||
res &= part.eval(obj)
|
||||
return res
|
||||
|
||||
def __repr__(self):
|
||||
return f"AndNode(" + ", ".join([repr(p) for p in self.parts]) + ")"
|
||||
|
||||
def __str__(self):
|
||||
return " and ".join([str(p) for p in self.parts])
|
||||
|
||||
|
||||
@dataclass(init=False)
|
||||
class OrNode(ExprNode):
|
||||
@@ -124,6 +155,11 @@ class OrNode(ExprNode):
|
||||
res |= part.eval(obj)
|
||||
return res
|
||||
|
||||
def __repr__(self):
|
||||
return f"OrNode(" + ", ".join([repr(p) for p in self.parts]) + ")"
|
||||
|
||||
def __str__(self):
|
||||
return " or ".join([str(p) for p in self.parts])
|
||||
|
||||
@dataclass()
|
||||
class NotNode(ExprNode):
|
||||
@@ -143,7 +179,7 @@ class TrueNode(ExprNode):
|
||||
return True
|
||||
|
||||
|
||||
class ExpressionParser:
|
||||
class ExpressionParser(BaseParser):
|
||||
"""
|
||||
will parser logic expression
|
||||
like not (a and b or c)
|
||||
@@ -151,7 +187,140 @@ class ExpressionParser:
|
||||
The nodes can be used for custom filtering (ex with ExplanationConcept)
|
||||
Or to help to understand why a python expression returns True or False
|
||||
"""
|
||||
pass
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__("Expression", 50, False)
|
||||
|
||||
def reset_parser(self, context, parser_input: ParserInput):
|
||||
self.context = context
|
||||
self.sheerka = context.sheerka
|
||||
self.parser_input = parser_input
|
||||
self.error_sink.clear()
|
||||
|
||||
try:
|
||||
self.parser_input.reset(False)
|
||||
self.parser_input.next_token()
|
||||
except LexerError as e:
|
||||
self.add_error(self.sheerka.new(BuiltinConcepts.ERROR, body=e), False)
|
||||
return False
|
||||
return True
|
||||
|
||||
def parse(self, context, parser_input: ParserInput):
|
||||
"""
|
||||
parser_input can be string, but text can also be an list of tokens
|
||||
:param context:
|
||||
:param parser_input:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if not isinstance(parser_input, ParserInput):
|
||||
return None
|
||||
|
||||
context.log(f"Parsing '{parser_input}' with ExpressionParser", self.name)
|
||||
sheerka = context.sheerka
|
||||
|
||||
if parser_input.is_empty():
|
||||
return context.sheerka.ret(self.name,
|
||||
False,
|
||||
sheerka.new(BuiltinConcepts.IS_EMPTY))
|
||||
|
||||
if not self.reset_parser(context, parser_input):
|
||||
return self.sheerka.ret(
|
||||
self.name,
|
||||
False,
|
||||
context.sheerka.new(BuiltinConcepts.ERROR, body=self.error_sink))
|
||||
|
||||
tree = self.parse_or()
|
||||
token = self.parser_input.token
|
||||
if token and token.type != TokenKind.EOF:
|
||||
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, []))
|
||||
|
||||
value = self.get_return_value_body(context.sheerka, self.parser_input.as_text(), tree, tree)
|
||||
|
||||
ret = self.sheerka.ret(
|
||||
self.name,
|
||||
not self.has_error,
|
||||
value)
|
||||
|
||||
return ret
|
||||
|
||||
def parse_or(self):
|
||||
expr = self.parse_and()
|
||||
token = self.parser_input.token
|
||||
if token.type != TokenKind.IDENTIFIER or token.value != "or":
|
||||
return expr
|
||||
|
||||
parts = [expr]
|
||||
while token.type == TokenKind.IDENTIFIER and token.value == "or":
|
||||
self.parser_input.next_token()
|
||||
expr = self.parse_and()
|
||||
if expr is None:
|
||||
self.add_error(UnexpectedEof("When parsing 'or'"))
|
||||
return OrNode(*parts)
|
||||
parts.append(expr)
|
||||
token = self.parser_input.token
|
||||
|
||||
return OrNode(*parts)
|
||||
|
||||
def parse_and(self):
|
||||
expr = self.parse_names()
|
||||
token = self.parser_input.token
|
||||
if token.type != TokenKind.IDENTIFIER or token.value != "and":
|
||||
return expr
|
||||
|
||||
parts = [expr]
|
||||
while token.type == TokenKind.IDENTIFIER and token.value == "and":
|
||||
self.parser_input.next_token()
|
||||
expr = self.parse_names()
|
||||
if expr is None:
|
||||
self.add_error(UnexpectedEof("When parsing 'and'"))
|
||||
return AndNode(*parts)
|
||||
parts.append(expr)
|
||||
token = self.parser_input.token
|
||||
|
||||
return AndNode(*parts)
|
||||
|
||||
def parse_names(self):
|
||||
|
||||
def stop():
|
||||
return token.type == TokenKind.EOF or \
|
||||
paren_count == 0 and token.type == TokenKind.RPAR or \
|
||||
token.type == TokenKind.IDENTIFIER and token.value in ("and", "or")
|
||||
|
||||
token = self.parser_input.token
|
||||
if token.type == TokenKind.EOF:
|
||||
return None
|
||||
|
||||
if token.type == TokenKind.LPAR:
|
||||
self.parser_input.next_token()
|
||||
expr = self.parse_or()
|
||||
token = self.parser_input.token
|
||||
if token.type != TokenKind.RPAR:
|
||||
self.error_sink.append(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
|
||||
return expr
|
||||
self.parser_input.next_token()
|
||||
return expr
|
||||
|
||||
buffer = []
|
||||
paren_count = 0
|
||||
while not stop():
|
||||
buffer.append(token)
|
||||
if token.type == TokenKind.LPAR:
|
||||
paren_count += 1
|
||||
if token.type == TokenKind.RPAR:
|
||||
paren_count -= 1
|
||||
self.parser_input.next_token(False)
|
||||
token = self.parser_input.token
|
||||
|
||||
if len(buffer) == 0:
|
||||
if token.type != TokenKind.RPAR:
|
||||
self.error_sink.append(LeftPartNotFoundError())
|
||||
return None
|
||||
|
||||
if buffer[-1].type == TokenKind.WHITESPACE:
|
||||
buffer.pop()
|
||||
|
||||
return NameExprNode(buffer)
|
||||
|
||||
|
||||
class ExpressionVisitor:
|
||||
@@ -175,3 +344,38 @@ class ExpressionVisitor:
|
||||
self.visit(item)
|
||||
elif isinstance(value, ExprNode):
|
||||
self.visit(value)
|
||||
|
||||
|
||||
class TrueifyVisitor(ExpressionVisitor):
|
||||
"""
|
||||
Visit an ExprNode
|
||||
replace all the nodes containing a variable to 'trueify' with True
|
||||
The node containing both variables to trueify and to skip are skipped
|
||||
"""
|
||||
|
||||
def __init__(self, to_trueify, to_skip):
|
||||
self.to_trueify = to_trueify
|
||||
self.to_skip = to_skip
|
||||
|
||||
def visit_AndNode(self, expr_node):
|
||||
parts = []
|
||||
for part in expr_node.parts:
|
||||
parts.append(self.visit(part))
|
||||
return AndNode(*parts)
|
||||
|
||||
def visit_OrNode(self, expr_node):
|
||||
parts = []
|
||||
for part in expr_node.parts:
|
||||
parts.append(self.visit(part))
|
||||
return OrNode(*parts)
|
||||
|
||||
def visit_NameExprNode(self, expr_node):
|
||||
return_true = False
|
||||
for t in expr_node.tokens:
|
||||
if t.type == TokenKind.IDENTIFIER:
|
||||
if t.value in self.to_skip:
|
||||
return expr_node
|
||||
if t.value in self.to_trueify:
|
||||
return_true = True
|
||||
|
||||
return NameExprNode([Token(TokenKind.IDENTIFIER, "True", -1, -1, -1)]) if return_true else expr_node
|
||||
|
||||
Reference in New Issue
Block a user