7dcaa9c111
Fixed #77 : Parser: ShortTermMemoryParser should be called separately Fixed #78 : Remove VariableNode usage Fixed #79 : ConceptManager: Implement compile caching Fixed #80 : SheerkaExecute : parsers_key is not correctly computed Fixed #81 : ValidateConceptEvaluator : Validate concept's where and pre clauses right after the parsing Fixed #82 : SheerkaIsAManager: isa() failed when the set as a body Fixed #83 : ValidateConceptEvaluator : Support BNF and SYA Concepts Fixed #84 : ExpressionParser: Implement the parser as a standard parser Fixed #85 : Services: Give order to services Fixed #86 : cannot manage smart_get_attr(the short, color)
233 lines
7.9 KiB
Python
233 lines
7.9 KiB
Python
from dataclasses import dataclass
|
|
from typing import List
|
|
|
|
import core.utils
|
|
from core.builtin_concepts import ReturnValueConcept
|
|
from core.builtin_concepts_ids import BuiltinConcepts
|
|
from core.global_symbols import NotInit
|
|
from core.sheerka.services.SheerkaExecute import ParserInput
|
|
from core.sheerka.services.SheerkaRuleManager import SheerkaRuleManager, CompiledCondition
|
|
from core.sheerka.services.sheerka_service import FailedToCompileError
|
|
from core.tokenizer import Keywords, TokenKind
|
|
from parsers.BaseCustomGrammarParser import BaseCustomGrammarParser, NameNode, KeywordNotFound, SyntaxErrorNode
|
|
from parsers.BaseParser import Node, UnexpectedEofParsingError
|
|
from parsers.FormatRuleActionParser import FormatAstNode
|
|
from sheerkarete.conditions import AndConditions
|
|
|
|
|
|
@dataclass()
|
|
class DefRuleNode(Node):
|
|
tokens: dict
|
|
name: NameNode = NotInit
|
|
python: List[CompiledCondition] = NotInit
|
|
rete: List[AndConditions] = NotInit
|
|
|
|
|
|
@dataclass()
|
|
class DefExecRuleNode(DefRuleNode):
|
|
then: ReturnValueConcept = NotInit
|
|
|
|
|
|
@dataclass
|
|
class DefFormatRuleNode(DefRuleNode):
|
|
print: FormatAstNode = NotInit
|
|
|
|
|
|
class DefRuleParser(BaseCustomGrammarParser):
|
|
DEF_KEYWORDS = [Keywords.RULE, Keywords.AS]
|
|
DEF_KEYWORDS_VALUES = [k.value for k in DEF_KEYWORDS]
|
|
|
|
RULE_KEYWORDS = [Keywords.WHEN, Keywords.THEN, Keywords.PRINT]
|
|
RULE_KEYWORDS_VALUES = [k.value for k in RULE_KEYWORDS]
|
|
|
|
def __init__(self, **kwargs):
|
|
BaseCustomGrammarParser.__init__(self, "DefRule", 60)
|
|
|
|
def parse(self, context, parser_input: ParserInput):
|
|
if not isinstance(parser_input, ParserInput):
|
|
return None
|
|
|
|
# rule parser can only manage string text
|
|
if parser_input.from_tokens:
|
|
ret = context.sheerka.ret(
|
|
self.name,
|
|
False,
|
|
context.sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=parser_input))
|
|
self.log_result(context, parser_input, ret)
|
|
return ret
|
|
|
|
context.log(f"Parsing '{parser_input}' with DefRuleParser", self.name)
|
|
sheerka = context.sheerka
|
|
|
|
if parser_input.is_empty():
|
|
return sheerka.ret(self.name,
|
|
False,
|
|
sheerka.new(BuiltinConcepts.IS_EMPTY))
|
|
|
|
if not self.reset_parser(context, parser_input):
|
|
return self.sheerka.ret(self.name,
|
|
False,
|
|
context.sheerka.new(BuiltinConcepts.ERROR, body=self.error_sink))
|
|
|
|
self.parser_input.next_token()
|
|
node = self.parse_def_rule()
|
|
|
|
body = self.get_return_value_body(sheerka, parser_input.as_text(), node, node, self.error_sink)
|
|
ret = sheerka.ret(self.name, not self.has_error, body)
|
|
|
|
self.log_result(context, parser_input.as_text(), ret)
|
|
return ret
|
|
|
|
def parse_def_rule(self):
|
|
token = self.parser_input.token
|
|
if token.value == Keywords.DEF.value:
|
|
return self.parse_rule_name()
|
|
elif token.value in (Keywords.WHEN.value, Keywords.PRINT.value):
|
|
return self.parse_rule()
|
|
else:
|
|
self.add_error(KeywordNotFound([], [Keywords.WHEN.value]))
|
|
return None
|
|
|
|
def parse_rule_name(self):
|
|
"""
|
|
Parses def rule xxx as yyyy
|
|
"""
|
|
self.parser_input.next_token() # eat def
|
|
token = self.parser_input.token
|
|
if token.value != Keywords.RULE.value:
|
|
self.add_error(KeywordNotFound([token], [Keywords.RULE.value]))
|
|
return None
|
|
|
|
buffer = []
|
|
while self.parser_input.next_token(skip_whitespace=False):
|
|
token = self.parser_input.token
|
|
if token.value == Keywords.AS.value:
|
|
break
|
|
else:
|
|
buffer.append(token)
|
|
else: # 'as' keyword not found
|
|
self.add_error(KeywordNotFound([], [Keywords.AS.value]))
|
|
return None
|
|
|
|
if not self.parser_input.next_token(): # eat as
|
|
self.add_error(UnexpectedEofParsingError("While parsing 'when'."))
|
|
return None
|
|
|
|
rule = self.parse_rule()
|
|
|
|
name_node = self.get_concept_name(buffer)
|
|
if name_node is None:
|
|
return rule
|
|
|
|
rule.name = name_node
|
|
return rule
|
|
|
|
def parse_rule(self):
|
|
""""
|
|
Parses 'when xxx then yyy'
|
|
or 'when xxx print yyy'
|
|
"""
|
|
parts = self.get_parts(self.RULE_KEYWORDS_VALUES, strip_tokens=True)
|
|
if Keywords.THEN in parts and Keywords.PRINT in parts:
|
|
self.add_error(SyntaxErrorNode([], "Cannot have both 'print' and 'then' keywords"))
|
|
return None
|
|
|
|
if Keywords.THEN not in parts and Keywords.PRINT not in parts:
|
|
self.add_error(KeywordNotFound([], [Keywords.THEN.value, Keywords.PRINT.value]))
|
|
return None
|
|
|
|
return self.parse_format_rule(parts) if Keywords.PRINT in parts else self.parse_exec_rule(parts)
|
|
|
|
def parse_exec_rule(self, parts):
|
|
node = DefExecRuleNode(parts)
|
|
try:
|
|
compiled_result = self.get_when(parts[Keywords.WHEN])
|
|
if compiled_result is None:
|
|
return node
|
|
node.python = compiled_result.python_conditions
|
|
node.rete = compiled_result.rete_conditions
|
|
|
|
parsed = self.get_then(parts[Keywords.THEN])
|
|
if parsed is None:
|
|
return node
|
|
node.then = parsed
|
|
except KeyError as e:
|
|
self.add_error(KeywordNotFound([], [e.args[0].value]))
|
|
return None
|
|
|
|
return node
|
|
|
|
def parse_format_rule(self, parts):
|
|
node = DefFormatRuleNode(parts)
|
|
try:
|
|
compiled_result = self.get_when(parts[Keywords.WHEN])
|
|
if compiled_result is None:
|
|
return node
|
|
node.python = compiled_result.python_conditions
|
|
node.rete = compiled_result.rete_conditions
|
|
|
|
parsed = self.get_print(parts[Keywords.PRINT])
|
|
if parsed is None:
|
|
return node
|
|
node.print = parsed
|
|
except KeyError as e:
|
|
self.add_error(KeywordNotFound([], [e.args[0].value]))
|
|
return None
|
|
|
|
return node
|
|
|
|
def get_when(self, tokens):
|
|
"""
|
|
Validate the when part of the rule.
|
|
:param tokens:
|
|
:return:
|
|
"""
|
|
source = core.utils.get_text_from_tokens(core.utils.strip_tokens(tokens[1:]))
|
|
try:
|
|
rule_manager_service = self.sheerka.services[SheerkaRuleManager.NAME]
|
|
compiled_result = rule_manager_service.compile_when(self.context, self.name, source)
|
|
except FailedToCompileError as ex:
|
|
for c in ex.cause:
|
|
self.add_error(c)
|
|
return None
|
|
|
|
return compiled_result
|
|
|
|
def get_then(self, tokens):
|
|
source = core.utils.get_text_from_tokens(core.utils.strip_tokens(tokens[1:]))
|
|
res = self.sheerka.services[SheerkaRuleManager.NAME].compile_exec(self.context, source)
|
|
|
|
if not res.status:
|
|
self.add_error(res.value)
|
|
return None
|
|
|
|
return res
|
|
|
|
def get_print(self, tokens):
|
|
"""
|
|
Validate the print part
|
|
:param tokens:
|
|
:return:
|
|
"""
|
|
source = core.utils.get_text_from_tokens(core.utils.strip_tokens(tokens[1:]))
|
|
res = self.sheerka.services[SheerkaRuleManager.NAME].compile_print(self.context, source)
|
|
if not res.status:
|
|
self.add_error(res.value)
|
|
return None
|
|
|
|
return res.body
|
|
|
|
def get_concept_name(self, tokens):
|
|
name_tokens = core.utils.strip_tokens(tokens)
|
|
if len(name_tokens) == 0:
|
|
self.add_error(SyntaxErrorNode([], "Name is mandatory"))
|
|
return None
|
|
|
|
for token in name_tokens:
|
|
if token.type == TokenKind.NEWLINE:
|
|
self.add_error(SyntaxErrorNode([token], "Newline are not allowed in name."))
|
|
return None
|
|
|
|
name_node = NameNode(name_tokens) # skip the first token
|
|
return name_node
|