Implemented a first and basic version of a Rete rule engine
This commit is contained in:
+179
-8
@@ -13,6 +13,7 @@ from core.utils import as_bag
|
||||
from parsers.BaseNodeParser import SourceCodeNode, ConceptNode, UnrecognizedTokensNode, SourceCodeWithConceptNode, \
|
||||
RuleNode
|
||||
from parsers.BaseParser import ParsingError
|
||||
from parsers.PythonParser import PythonParser
|
||||
|
||||
PARSE_STEPS = [BuiltinConcepts.BEFORE_PARSING, BuiltinConcepts.PARSING, BuiltinConcepts.AFTER_PARSING]
|
||||
EVAL_STEPS = PARSE_STEPS + [BuiltinConcepts.BEFORE_EVALUATION, BuiltinConcepts.EVALUATION,
|
||||
@@ -235,7 +236,7 @@ def only_parsers_results(context, return_values):
|
||||
Filters the return_values and returns when the result is a ParserResult
|
||||
regardless of the status
|
||||
|
||||
So it filters errors
|
||||
So it filters parsers in error (ERROR, NOT_FOR_ME, EMPTY...)
|
||||
:param context:
|
||||
:param return_values:
|
||||
:return:
|
||||
@@ -332,7 +333,7 @@ def parse_unrecognized(context, source, parsers, who=None, prop=None, filter_fun
|
||||
|
||||
def parse_function(context, source, tokens=None, start=0):
|
||||
"""
|
||||
Helper function to parse what is supposed to be a function
|
||||
Helper function that parses what is supposed to be a function
|
||||
:param context:
|
||||
:param source:
|
||||
:param tokens:
|
||||
@@ -361,6 +362,34 @@ def parse_function(context, source, tokens=None, start=0):
|
||||
return res
|
||||
|
||||
|
||||
def parse_python(context, source, desc=None):
|
||||
"""
|
||||
Helper function that parses what is known to be Python source code
|
||||
:param context:
|
||||
:param source:
|
||||
:param desc: option description when creating the sub context
|
||||
"""
|
||||
desc = desc or f"Compiling python '{source}'"
|
||||
with context.push(BuiltinConcepts.PARSE_CODE,
|
||||
{"language": "Python", "source": source},
|
||||
desc) as sub_context:
|
||||
parser_input = context.sheerka.services[SheerkaExecute.NAME].get_parser_input(source)
|
||||
python_parser = PythonParser()
|
||||
return python_parser.parse(sub_context, parser_input)
|
||||
|
||||
|
||||
def parse_expression(context, source, desc=None):
|
||||
"""
|
||||
Helper function to parser expressions with AND, OR and NOT
|
||||
"""
|
||||
desc = desc or f"Parsing expression '{source}'"
|
||||
with context.push(BuiltinConcepts.PARSE_CODE, source, desc) as sub_context:
|
||||
parser_input = context.sheerka.services[SheerkaExecute.NAME].get_parser_input(source)
|
||||
from parsers.ExpressionParser import ExpressionParser
|
||||
expr_parser = ExpressionParser()
|
||||
return expr_parser.parse(sub_context, parser_input)
|
||||
|
||||
|
||||
def evaluate(context,
|
||||
source,
|
||||
evaluators="all",
|
||||
@@ -472,12 +501,67 @@ def get_lexer_nodes(return_values, start, tokens):
|
||||
return lexer_nodes
|
||||
|
||||
|
||||
def ensure_evaluated(context, concept, eval_body=True):
|
||||
def get_lexer_nodes_using_positions(return_values, positions):
|
||||
"""
|
||||
Transform all elements from return_values into lexer nodes
|
||||
use positions to remap the exact positions
|
||||
"""
|
||||
lexer_nodes = []
|
||||
for ret_val, position in zip(return_values, positions):
|
||||
if ret_val.who in ("parsers.Python", 'parsers.PythonWithConcepts'):
|
||||
|
||||
lexer_nodes.append(SourceCodeNode(position.start,
|
||||
position.end,
|
||||
position.tokens,
|
||||
ret_val.body.source,
|
||||
python_node=ret_val.body.body,
|
||||
return_value=ret_val))
|
||||
|
||||
elif ret_val.who == "parsers.ExactConcept":
|
||||
concepts = ret_val.body.body if hasattr(ret_val.body.body, "__iter__") else [ret_val.body.body]
|
||||
for concept in concepts:
|
||||
lexer_nodes.append(ConceptNode(concept,
|
||||
position.start,
|
||||
position.end,
|
||||
position.tokens,
|
||||
ret_val.body.source))
|
||||
|
||||
elif ret_val.who in ("parsers.Bnf", "parsers.Sya", "parsers.Sequence"):
|
||||
nodes = [node for node in ret_val.body.body]
|
||||
for node in nodes:
|
||||
node.start = position.start
|
||||
node.end = position.end
|
||||
|
||||
# but append the whole sequence if when it's a sequence
|
||||
lexer_nodes.extend(nodes)
|
||||
|
||||
elif ret_val.who == "parsers.Rule":
|
||||
rules = ret_val.body.body if hasattr(ret_val.body.body, "__iter__") else [ret_val.body.body]
|
||||
for rule in rules:
|
||||
lexer_nodes.append(RuleNode(rule,
|
||||
position.start,
|
||||
position.end,
|
||||
position.tokens, ret_val.body.source))
|
||||
|
||||
elif ret_val.who == "parsers.Function":
|
||||
node = ret_val.body.body
|
||||
node.start = position.start
|
||||
node.end = position.end
|
||||
lexer_nodes.append(node)
|
||||
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
return lexer_nodes
|
||||
|
||||
|
||||
def ensure_evaluated(context, concept, eval_body=True, metadata=None):
|
||||
"""
|
||||
Evaluate a concept is not already evaluated
|
||||
:param context:
|
||||
:param concept:
|
||||
:param eval_body:
|
||||
:param metadata:
|
||||
:return:
|
||||
"""
|
||||
if concept.get_metadata().is_evaluated:
|
||||
@@ -485,13 +569,13 @@ def ensure_evaluated(context, concept, eval_body=True):
|
||||
|
||||
# do not try to evaluate concept that are not fully initialized
|
||||
if concept.get_metadata().definition_type != DEFINITION_TYPE_BNF:
|
||||
for var in concept.get_metadata().variables:
|
||||
if var[1] is None and \
|
||||
var[0] not in concept.get_compiled() and \
|
||||
(var[0] not in concept.values() or concept.get_value(var[0]) == NotInit):
|
||||
for var_name, var_default_value in concept.get_metadata().variables:
|
||||
if var_default_value is None and \
|
||||
var_name not in concept.get_compiled() and \
|
||||
(var_name not in concept.values() or concept.get_value(var_name) == NotInit):
|
||||
return concept
|
||||
|
||||
evaluated = context.sheerka.evaluate_concept(context, concept, eval_body=eval_body)
|
||||
evaluated = context.sheerka.evaluate_concept(context, concept, eval_body=eval_body, metadata=metadata)
|
||||
return evaluated
|
||||
|
||||
|
||||
@@ -731,3 +815,90 @@ def evaluate_object(bag, properties):
|
||||
bag = as_bag(obj)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def is_a_question(context, concept):
|
||||
"""
|
||||
Returns True if the concept must be executed in the context of BuiltinConcepts.EVAL_QUESTION_REQUESTED
|
||||
The only two ways that are currently supported are
|
||||
* is_question() appears in the pre condition
|
||||
* context.in_context(BuiltinConcepts.EVAL_QUESTION_REQUESTED) appears in the pre condition
|
||||
:param context:
|
||||
:param concept: concept to analyse
|
||||
"""
|
||||
pre = concept.get_metadata().pre
|
||||
if pre in (None, NotInit, ""):
|
||||
return False
|
||||
|
||||
parser_input_service = context.sheerka.services[SheerkaExecute.NAME]
|
||||
from parsers.ExpressionParser import ExpressionParser
|
||||
parser = ExpressionParser()
|
||||
|
||||
res = parser.parse(context, parser_input_service.get_parser_input(pre))
|
||||
if not res.status:
|
||||
return False
|
||||
|
||||
node = res.body.body
|
||||
from parsers.expressions import IsAQuestionVisitor
|
||||
return IsAQuestionVisitor().is_a_question(node)
|
||||
|
||||
|
||||
def get_inner_body(context, concept):
|
||||
"""
|
||||
For container concept, returns the body
|
||||
"""
|
||||
if context.sheerka.isinstance(concept.body, BuiltinConcepts.ONLY_SUCCESSFUL):
|
||||
return concept.body.body
|
||||
else:
|
||||
return concept.body
|
||||
|
||||
|
||||
class CreateObjectIdentifiers:
|
||||
"""
|
||||
Class that creates unique identifiers for Concept or Rule objects
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.identifiers = {}
|
||||
self.identifiers_key = {}
|
||||
|
||||
@staticmethod
|
||||
def sanitize(identifier):
|
||||
if identifier is None:
|
||||
return ""
|
||||
|
||||
res = ""
|
||||
for c in identifier:
|
||||
res += c if c.isalnum() else "0"
|
||||
return res
|
||||
|
||||
def get_identifier(self, obj, wrapper):
|
||||
"""
|
||||
Get an identifier for a concept.
|
||||
Make sure to return the same identifier if the same concept
|
||||
Make sure to return a different identifier if same name but different concept
|
||||
|
||||
Internal function because I don't want identifiers, identifiers_key and python_ids_mappings
|
||||
to be instance variables
|
||||
I would like to keep this parser as stateless as possible
|
||||
:param obj:
|
||||
:param wrapper: string or char that will wrap the result (ex '__C__' or '__R__')
|
||||
:return:
|
||||
"""
|
||||
if id(obj) in self.identifiers:
|
||||
return self.identifiers[id(obj)]
|
||||
|
||||
identifier = wrapper + self.sanitize(obj.key or obj.name)
|
||||
if obj.id:
|
||||
identifier += "__" + obj.id
|
||||
|
||||
if identifier in self.identifiers_key:
|
||||
self.identifiers_key[identifier] += 1
|
||||
identifier += f"_{self.identifiers_key[identifier]}"
|
||||
else:
|
||||
self.identifiers_key[identifier] = 0
|
||||
|
||||
identifier += wrapper
|
||||
|
||||
self.identifiers[id(obj)] = identifier
|
||||
return identifier
|
||||
|
||||
Reference in New Issue
Block a user