Refactored sheerka execution flow + Enhanced log management

This commit is contained in:
2019-12-19 21:02:20 +01:00
parent 8dbe2e1b20
commit 5c95d918ad
32 changed files with 942 additions and 308 deletions
+56 -31
View File
@@ -11,46 +11,71 @@ class BuiltinConcepts(Enum):
The key if the name of the concept
The id is a sequential number given just before the concept is saved in sdp
The values of the enum are just a convenient way for me to group the concepts
The values of the enum is not used the code
"""
SHEERKA = 1
SUCCESS = 2
ERROR = 3
UNKNOWN_CONCEPT = 4 # the request concept is not recognized
RETURN_VALUE = 5 # a value is returned
CONCEPT_TOO_LONG = 6 # concept cannot be processed by exactConcept parser
NEW_CONCEPT = 7 # when a new concept is added
UNKNOWN_PROPERTY = 8 # when requesting for a unknown property
PARSER_RESULT = 9
TOO_MANY_SUCCESS = 10 # when expecting a limited number of successful return value
TOO_MANY_ERRORS = 11 # when expecting a limited number of successful return value
NOT_FOR_ME = 12 # a parser recognize that the entry is not meant for it
IS_EMPTY = 13 # when a set is empty
INVALID_RETURN_VALUE = 14 # the return value of an evaluator is not correct
BEFORE_PARSING = 15 # activated before evaluation by the parsers
PARSING = 16 # activated during the parsing. It contains the text to parse
AFTER_PARSING = 17 # after parsing
BEFORE_EVALUATION = 18 # before evaluation
EVALUATION = 19 # activated when the parsing process seems to be finished
AFTER_EVALUATION = 20 # activated when the parsing process seems to be finished
CONCEPT_ALREADY_DEFINED = 21 # when you try to add the same concept twice
NOP = 22 # no operation concept. Does nothing
PROPERTY_EVAL_ERROR = 23 # cannot evaluate a property of a concept
ENUMERATION = 24 # represents a list or a set
LIST = 25 # represents a list
CANNOT_RESOLVE_VALUE_ERROR = 26 # In presence of a concept where the default value is not know
SHEERKA = "sheerka"
NODE = 200
GENERIC_NODE = 201
IDENTIFIER_NODE = 202
BEFORE_PARSING = "before parsing" # activated before evaluation by the parsers
PARSING = "parsing" # activated during the parsing. It contains the text to parse
AFTER_PARSING = "after parsing" # after parsing
BEFORE_EVALUATION = "before evaluation" # before evaluation
EVALUATION = "evaluation" # activated when the parsing process seems to be finished
AFTER_EVALUATION = "after evaluation" # activated when the parsing process seems to be finished
BEFORE_RENDERING = "before rendering" # activate before the output is rendered
RENDERING = "rendering" # rendering the response from sheerka
AFTER_RENDERING = "after rendering" # rendering the response from sheerka
USER_INPUT = "user input" # represent an input from an user
SUCCESS = "success"
ERROR = "error"
UNKNOWN_CONCEPT = "unknown concept" # the request concept is not recognized
RETURN_VALUE = "return value" # a value is returned
CONCEPT_TOO_LONG = "concept too long" # concept cannot be processed by exactConcept parser
NEW_CONCEPT = "new concept" # when a new concept is added
UNKNOWN_PROPERTY = "unknown property" # when requesting for a unknown property
PARSER_RESULT = "parser result"
TOO_MANY_SUCCESS = "too many success" # when expecting a limited number of successful return value
TOO_MANY_ERRORS = "too many errors" # when expecting a limited number of successful return value
NOT_FOR_ME = "not for me" # a parser recognize that the entry is not meant for it
IS_EMPTY = "is empty" # when a set is empty
INVALID_RETURN_VALUE = "invalid return value" # the return value of an evaluator is not correct
CONCEPT_ALREADY_DEFINED = "concept already defined" # when you try to add the same concept twice
NOP = "no operation" # no operation concept. Does nothing
PROPERTY_EVAL_ERROR = "property evaluation error" # cannot evaluate a property of a concept
ENUMERATION = "enum" # represents a list or a set
LIST = "list" # represents a list
CANNOT_RESOLVE_VALUE_ERROR = "value cannot be resolved" # don't know how to find concept value
NODE = "node"
GENERIC_NODE = "generic node"
IDENTIFIER_NODE = "identifier node"
def __repr__(self):
return "__" + self.name
def __str__(self):
return "__" + self.name
"""
Some concepts have a specific implementation
It's mainly to a have proper __repr__ implementation, or redefine the is_unique attribut
It's mainly to a have proper __repr__ implementation, or because they are singleton (is_unique=True)
"""
class UserInputConcept(Concept):
def __init__(self, text=None, user_name=None):
super().__init__(BuiltinConcepts.USER_INPUT, True, False, BuiltinConcepts.USER_INPUT, text)
self.set_prop("user_name", user_name)
@property
def text(self):
return self.body
@property
def user_name(self):
return self.props["user_name"].value
class SuccessConcept(Concept):
def __init__(self):
super().__init__(BuiltinConcepts.SUCCESS, True, True, BuiltinConcepts.SUCCESS)
+3 -2
View File
@@ -1,12 +1,11 @@
import hashlib
from dataclasses import dataclass
from enum import Enum
import logging
from core.sheerka_logger import get_logger
import core.utils
from core.tokenizer import Tokenizer, TokenKind
log = logging.getLogger(__name__)
PROPERTIES_FOR_DIGEST = ("name", "key",
"definition", "definition_type",
@@ -86,6 +85,8 @@ class Concept:
self.props = {} # list of Property for this concept
self.cached_asts = {} # cached ast for the where, pre, post and body parts
self.bnf = None
self.log = get_logger("core." + self.__class__.__name__)
self.init_log = get_logger("init.core." + self.__class__.__name__)
def __repr__(self):
return f"({self.metadata.id}){self.metadata.name}"
+241 -112
View File
@@ -2,20 +2,18 @@ from dataclasses import dataclass, field
from core.builtin_concepts import BuiltinConcepts, ErrorConcept, ReturnValueConcept
from core.concept import Concept, ConceptParts, PROPERTIES_FOR_DIGEST
from evaluators.BaseEvaluator import OneReturnValueEvaluator
from parsers.BaseParser import BaseParser
from sdp.sheerkaDataProvider import SheerkaDataProvider, Event, SheerkaDataProviderDuplicateKeyError
import core.utils
import core.builtin_helpers
import logging
from core.sheerka_logger import console_handler, get_logger
log = logging.getLogger(__name__)
init_log = logging.getLogger(__name__ + ".init")
import logging
concept_evaluation_steps = [BuiltinConcepts.EVALUATION, BuiltinConcepts.AFTER_EVALUATION]
CONCEPT_LEXER_PARSER_CLASS = "parsers.ConceptLexerParser.ConceptLexerParser"
DEBUG_TAB_SIZE = 4
class Sheerka(Concept):
"""
@@ -27,14 +25,17 @@ class Sheerka(Concept):
BUILTIN_CONCEPTS_KEYS = "Builtins_Concepts" # sequential key for builtin concepts
USER_CONCEPTS_KEYS = "User_Concepts" # sequential key for user defined concepts
def __init__(self, debug=False, skip_builtins_in_db=False, loggers=None):
log.debug("Starting Sheerka.")
def __init__(self, skip_builtins_in_db=False, debug=False, loggers=None):
self.init_logging(debug, loggers)
super().__init__(BuiltinConcepts.SHEERKA, True, True, BuiltinConcepts.SHEERKA)
self.log.debug("Starting Sheerka.")
# cache of the most used concepts
# Note that these are only templates
# They are used as a footprint for instantiation
# Except of source when the concept is supposed to be unique
# key is the key of the concept (not the name or the id)
self.concepts_cache = {}
#
@@ -64,8 +65,6 @@ class Sheerka(Concept):
self.evaluators_prefix: str = None
self.parsers_prefix: str = None
self.debug = debug
self.loggers = loggers or []
self.skip_builtins_in_db = skip_builtins_in_db
def initialize(self, root_folder: str = None):
@@ -78,8 +77,6 @@ class Sheerka(Concept):
"""
try:
self.init_logging()
self.sdp = SheerkaDataProvider(root_folder)
if self.sdp.first_time:
self.sdp.set_key(self.USER_CONCEPTS_KEYS, 1000)
@@ -102,7 +99,7 @@ class Sheerka(Concept):
Initializes the builtin concepts
:return: None
"""
init_log.debug("Initializing builtin concepts")
self.init_log.debug("Initializing builtin concepts")
builtins_classes = self.get_builtins_classes_as_dict()
# this all initialization of the builtins seems to be little bit complicated
@@ -118,11 +115,11 @@ class Sheerka(Concept):
if not self.skip_builtins_in_db:
from_db = self.sdp.get_safe(self.CONCEPTS_ENTRY, concept.metadata.key)
if from_db is None:
init_log.debug(f"'{concept.name}' concept is not found in db. Adding.")
self.init_log.debug(f"'{concept.name}' concept is not found in db. Adding.")
self.set_id_if_needed(concept, True)
self.sdp.add("init", self.CONCEPTS_ENTRY, concept, use_ref=True)
else:
init_log.debug(f"Found concept '{from_db}' in db. Updating.")
self.init_log.debug(f"Found concept '{from_db}' in db. Updating.")
concept.update_from(from_db)
self.add_in_cache(concept)
@@ -132,12 +129,13 @@ class Sheerka(Concept):
Init the parsers
:return:
"""
core.utils.init_package_import("parsers")
base_class = core.utils.get_class("parsers.BaseParser.BaseParser")
for parser in core.utils.get_sub_classes("parsers", base_class):
if parser.__module__ == base_class.__module__:
continue
init_log.debug(f"Adding builtin parser '{parser.__name__}'")
self.init_log.debug(f"Adding builtin parser '{parser.__name__}'")
self.parsers[core.utils.get_full_qualified_name(parser)] = parser
def initialize_builtin_evaluators(self):
@@ -145,117 +143,113 @@ class Sheerka(Concept):
Init the evaluators
:return:
"""
core.utils.init_package_import("evaluators")
for evaluator in core.utils.get_sub_classes("evaluators", "evaluators.BaseEvaluator.OneReturnValueEvaluator"):
init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
self.init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
self.evaluators.append(evaluator)
for evaluator in core.utils.get_sub_classes("evaluators", "evaluators.BaseEvaluator.AllReturnValuesEvaluator"):
init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
self.init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
self.evaluators.append(evaluator)
def initialize_concepts_definitions(self, execution_context):
init_log.debug("Initializing concepts definitions")
self.init_log.debug("Initializing concepts definitions")
definitions = self.sdp.get_safe(self.CONCEPTS_DEFINITIONS_ENTRY, load_origin=False)
if definitions is None:
init_log.debug("No BNF defined")
self.init_log.debug("No BNF defined")
return
lexer_parser = self.parsers[CONCEPT_LEXER_PARSER_CLASS]()
ret_val = lexer_parser.initialize(execution_context, definitions)
if not ret_val.status:
init_log.error("Failed to initialize concepts definitions " + str(ret_val.body))
self.init_log.error("Failed to initialize concepts definitions " + str(ret_val.body))
return
self.concepts_grammars = lexer_parser.concepts_grammars
def init_logging(self):
def _logger_filter(record: logging.LogRecord):
if 'all' in self.loggers:
return True
ret = True
if 'init' not in self.loggers and record.name.endswith(".init"):
ret = False
return ret
handler = logging.StreamHandler()
handler.addFilter(_logger_filter)
if self.debug:
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
log_level = logging.DEBUG
else:
log_format = "%(message)s"
log_level = logging.INFO
logging.basicConfig(format=log_format, level=log_level, handlers=[handler])
def eval(self, text: str):
def evaluate_user_input(self, text: str, user_name="kodjo"):
"""
Note to KSI: If you try to add execution context to this function,
You may end in an infinite loop
:param text:
:param user_name:
:return:
"""
log.debug(f"Evaluating '{text}'.")
evt_digest = self.sdp.save_event(Event(text))
log.debug(f"{evt_digest=}")
exec_context = ExecutionContext(self.key, evt_digest, self)
self.log.debug(f"Processing user input '{text}', {user_name=}.")
evt_digest = self.sdp.save_event(Event(text, user_name))
self.log.debug(f"{evt_digest=}")
execution_context = ExecutionContext(self.key, evt_digest, self)
# Before parsing
before_parsing = self.new(BuiltinConcepts.BEFORE_PARSING)
return_values = self.process(exec_context, [], [before_parsing])
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == before_parsing)
user_input = self.ret(self.name, True, self.new(BuiltinConcepts.USER_INPUT, body=text, user_name=user_name))
steps = [
BuiltinConcepts.BEFORE_PARSING,
BuiltinConcepts.PARSING,
BuiltinConcepts.EVALUATION,
BuiltinConcepts.AFTER_EVALUATION
]
# parse
parsing_results = self.parse(exec_context, text)
return_values.extend(parsing_results)
return self.execute(execution_context, user_input, steps)
# evaluate
evaluating = self.new(BuiltinConcepts.EVALUATION)
return_values = self.process(exec_context, return_values, [evaluating])
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == evaluating)
def _call_parsers(self, execution_context, return_values, logger=None):
# post evaluation
after_evaluation = self.new(BuiltinConcepts.AFTER_EVALUATION)
return_values = self.process(exec_context, return_values, [after_evaluation])
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == after_evaluation)
return return_values
def parse(self, context, text):
result = []
if log.isEnabledFor(logging.DEBUG):
debug_text = "'" + text + "'" if isinstance(text, str) \
else "'" + BaseParser.get_text_from_tokens(text) + "' as tokens"
log.debug(f"Parsing {debug_text}")
# return_values must be a list
if not isinstance(return_values, list):
return_values = [return_values]
for return_value in return_values:
if not return_value.status or not self.isinstance(return_value.body, BuiltinConcepts.USER_INPUT):
continue
to_parse = self.value(return_value)
if self.log.isEnabledFor(logging.DEBUG):
debug_text = "'" + to_parse + "'" if isinstance(to_parse, str) \
else "'" + BaseParser.get_text_from_tokens(to_parse) + "' as tokens"
# self.log.debug(f"Parsing {debug_text}")
for parser in self.parsers.values():
p = parser(sheerka=self)
res = p.parse(context, text)
if isinstance(res, list):
result.extend(res)
if logger:
p.log = logger
res = p.parse(execution_context, to_parse)
if hasattr(res, "__iter__"):
for r in res:
r.parents = [return_value]
result.append(r)
else:
res.parents = [return_value]
result.append(res)
return result
def process(self, context, return_values, initial_concepts=None):
log.debug(f"{initial_concepts=}. Processing " + core.utils.pp(return_values))
def _call_evaluators(self, execution_context, return_values, process_step, evaluation_context=None):
"""
"""
# return_values must be a list
if not isinstance(return_values, list):
return_values = [return_values]
# adds contextual concepts
if initial_concepts:
for concept in initial_concepts:
return_values.append(self.ret(context.who, True, concept))
# evaluation context are contexts that may modify the behaviour of the execution
# They first need to be transformed into return values
if evaluation_context is None:
evaluation_return_values = []
else:
evaluation_return_values = [self.ret(execution_context.who, True, c) for c in evaluation_context]
# add the current step as part as the evaluation context
evaluation_return_values.append(self.ret(execution_context.who, True, self.new(process_step)))
# the pool of return values are the mix
return_values.extend(evaluation_return_values)
# group the evaluators by priority and sort them
# The first one to be applied will be the one with the highest priority
grouped_evaluators = {}
all_evaluators = [e() for e in self.evaluators]
for evaluator in [e for e in all_evaluators if e.enabled]:
for evaluator in [e() for e in self.evaluators if e.enabled]:
grouped_evaluators.setdefault(evaluator.priority, []).append(evaluator)
sorted_priorities = sorted(grouped_evaluators.keys(), reverse=True)
@@ -264,19 +258,18 @@ class Sheerka(Concept):
simple_digest = return_values[:]
for priority in sorted_priorities:
# log.debug("Processing priority " + str(priority))
# for item in return_values:
# log.debug(item)
original_items = return_values[:]
evaluated_items = []
to_delete = []
for evaluator in grouped_evaluators[priority]:
# process evaluators that work on return value
from evaluators.BaseEvaluator import OneReturnValueEvaluator
if isinstance(evaluator, OneReturnValueEvaluator):
for item in original_items:
if evaluator.matches(context, item):
result = evaluator.eval(context, item)
if evaluator.matches(execution_context, item):
result = evaluator.eval(execution_context, item)
if result is None:
continue
elif isinstance(result, list):
@@ -292,8 +285,8 @@ class Sheerka(Concept):
to_delete.append(item)
# process evaluators that work on all return values
else:
if evaluator.matches(context, original_items):
results = evaluator.eval(context, original_items)
if evaluator.matches(execution_context, original_items):
results = evaluator.eval(execution_context, original_items)
if results is None:
continue
if not isinstance(results, list):
@@ -306,27 +299,36 @@ class Sheerka(Concept):
return_values.extend([item for item in original_items if item not in to_delete])
# have we done something ?
to_compare = return_values[:] # set(id(r) for r in return_values)
to_compare = return_values[:]
if simple_digest == to_compare:
break
# inc the iteration and continue
execution_context = execution_context.push(iteration=execution_context.iteration + 1)
# remove all evaluation context that are not reduced
return_values = core.utils.remove_list_from_list(return_values, evaluation_return_values)
return return_values
def chain_process(self, context, return_values, initial_concepts):
def execute(self, execution_context, return_values, execution_steps, logger=None):
"""
Executes process for all initial contexts
:param context:
:param execution_context:
:param return_values:
:param initial_concepts:
:param execution_steps:
:param logger: logger to use (if not directly called by sheerka)
:return:
"""
for concept in initial_concepts:
if isinstance(concept, BuiltinConcepts):
concept = self.new(BuiltinConcepts)
init = [self.ret(context.who, True, concept)]
return_values = self.process(context, return_values, [init])
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == init)
for step in execution_steps:
sub_context = execution_context.push(step=step)
sub_context.log(logger or self.log, f"{step=}, context='{sub_context}'")
if step == BuiltinConcepts.PARSING:
return_values = self._call_parsers(sub_context, return_values, logger)
else:
return_values = self._call_evaluators(sub_context, return_values, step)
sub_context.log_result(logger or self.log, return_values)
return return_values
@@ -340,7 +342,7 @@ class Sheerka(Concept):
if obj.metadata.id is not None:
return
obj.metadata.id = self.sdp.get_next_key(self.BUILTIN_CONCEPTS_KEYS if is_builtin else self.USER_CONCEPTS_KEYS)
log.debug(f"Setting id '{obj.metadata.id}' to concept '{obj.metadata.name}'.")
self.log.debug(f"Setting id '{obj.metadata.id}' to concept '{obj.metadata.name}'.")
def create_new_concept(self, context, concept: Concept):
"""
@@ -369,8 +371,8 @@ class Sheerka(Concept):
# check if it's a valid BNF or whether it breaks the known rules
concept_lexer_parser = self.parsers[CONCEPT_LEXER_PARSER_CLASS](grammars=self.concepts_grammars.copy())
sub_context = context.push(self.name, "Initializing concept definition")
sub_context.concepts_cache[concept.key] = concept # the concept is not in the real cache yet
sub_context = context.push(self.name, desc="Initializing concept definition")
sub_context.concepts[concept.key] = concept # the concept is not in the real cache yet
init_ret_value = concept_lexer_parser.initialize(sub_context, concepts_definitions)
if not init_ret_value.status:
return self.ret(self.create_new_concept.__name__, False, ErrorConcept(init_ret_value.value))
@@ -402,6 +404,8 @@ class Sheerka(Concept):
:param context:
:return:
"""
# steps = [BuiltinConcepts.BEFORE_PARSING, BuiltinConcepts.PARSING, BuiltinConcepts.AFTER_PARSING]
steps = [BuiltinConcepts.PARSING]
for part_key in ConceptParts:
source = getattr(concept.metadata, part_key.value)
if source is None or not isinstance(source, str) or source == "":
@@ -409,10 +413,12 @@ class Sheerka(Concept):
# I refuse empty strings for performance, I don't want to handle useless NOPConcepts
continue
else:
concept.cached_asts[part_key] = self.parse(context, source)
to_parse = self.ret(context.who, True, self.new(BuiltinConcepts.USER_INPUT, body=source))
concept.cached_asts[part_key] = self.execute(context, to_parse, steps)
for prop in concept.props:
concept.cached_asts[prop] = self.parse(context, concept.props[prop].value)
to_parse = self.ret(context.who, True, self.new(BuiltinConcepts.USER_INPUT, body=concept.props[prop].value))
concept.cached_asts[prop] = self.execute(context, to_parse, steps)
# updates the code of the reference when possible
if concept.key in self.concepts_cache:
@@ -446,7 +452,7 @@ class Sheerka(Concept):
part_key = ConceptParts(prop)
if concept.cached_asts[part_key] is None:
continue
res = self.chain_process(context, concept.cached_asts[part_key], concept_evaluation_steps)
res = self.execute(context, concept.cached_asts[part_key], concept_evaluation_steps)
res = core.builtin_helpers.expect_one(context, res)
setattr(concept.metadata, prop, res.value)
@@ -564,6 +570,11 @@ class Sheerka(Concept):
if obj is None:
return None
if self.isinstance(obj, BuiltinConcepts.RETURN_VALUE) and \
obj.status and \
self.isinstance(obj.value, BuiltinConcepts.USER_INPUT):
return obj.value.text
if not isinstance(obj, Concept):
return obj
@@ -664,6 +675,19 @@ class Sheerka(Concept):
def test(self):
return f"I have access to Sheerka !"
def test_error(self):
raise Exception("I can raise an error")
def dump_concepts(self):
lst = self.sdp.list(self.CONCEPTS_ENTRY)
for item in lst:
if hasattr(item, "__iter__"):
for i in item:
self.log.info(i)
else:
self.log.info(item)
@staticmethod
def get_builtins_classes_as_dict():
res = {}
@@ -673,18 +697,123 @@ class Sheerka(Concept):
return res
@staticmethod
def init_logging(debug, loggers):
core.sheerka_logger.set_enabled(loggers)
if debug:
# log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
log_format = "%(asctime)s [%(levelname)s] %(message)s"
log_level = logging.DEBUG
else:
log_format = "%(message)s"
log_level = logging.INFO
# logging.root.setLevel(log_level)
# fmt = logging.Formatter(log_format, None, "%")
# console_handler.setFormatter(fmt)
logging.basicConfig(format=log_format, level=log_level, handlers=[console_handler])
@dataclass
class ExecutionContext:
"""
To keep track of the execution of a request
"""
who: object # who is asking
event_digest: str # what was the (original) trigger
sheerka: Sheerka # sheerka
desc: str = None # human description of what is going on
obj: Concept = None # what is the subject of the execution context (if known)
concepts_cache: dict = field(default_factory=dict)
def push(self, who, desc=None, obj=None):
return ExecutionContext(who, self.event_digest, self.sheerka, desc=desc, obj=obj)
def __init__(self,
who,
event_digest: str,
sheerka: Sheerka,
/,
desc: str = None,
obj: Concept = None,
step: BuiltinConcepts = None,
iteration: int = 0,
concepts: dict = None):
self.who = who # who is asking
self.event_digest = event_digest # what was the (original) trigger
self.sheerka = sheerka # sheerka
self.step = step
self.iteration = iteration
self.desc = desc # human description of what is going on
self.obj = obj # what is the subject of the execution context (if known)
self.concepts = concepts or {}
self._id = ExecutionContextIdManager.get_id(event_digest)
self._tab = ""
@property
def id(self):
return self._id
def push(self, who=None, /, **kwargs):
who = who or self.who
desc = kwargs.get("desc", "")
obj = kwargs.get("obj", self.obj)
concepts = kwargs.get("concepts", self.concepts)
step = kwargs.get("step", self.step)
iteration = kwargs.get("iteration", self.iteration)
new = ExecutionContext(
who,
self.event_digest,
self.sheerka,
desc=desc,
obj=obj,
concepts=concepts,
step=step,
iteration=iteration,
)
new._tab = self._tab + " " * DEBUG_TAB_SIZE
return new
def log_new(self, logger):
logger.debug(f"[{self._id:2}]" + self._tab + str(self))
def log(self, logger, message, who=None):
logger.debug(f"[{self._id:2}]" + self._tab + (f"[{who}] " if who else "") + str(message))
def log_error(self, logger, message, who=None):
logger.exception(f"[{self._id:2}]" + self._tab + (f"[{who}] " if who else "") + str(message))
def log_result(self, logger, return_values):
if not logger.isEnabledFor(logging.DEBUG):
return
if len(return_values) == 0:
logger.debug(self._tab + "No return value")
for r in return_values:
to_str = self.return_value_to_str(r)
logger.debug(f"[{self._id:2}]" + self._tab + "-> " + to_str)
@staticmethod
def return_value_to_str(r):
value = str(r.value)
if len(value) > 50:
value = value[:47] + "..."
to_str = f"ReturnValue(who={r.who}, status={r.status}, value={value})"
return to_str
def __repr__(self):
msg = f"ExecutionContext(who={self.who}, id={self._id}"
if self.desc:
msg += f", desc='{self.desc}'"
msg += ")"
return msg
class ExecutionContextIdManager:
ids = {}
@staticmethod
def get_id(event_digest):
if event_digest in ExecutionContextIdManager.ids:
ExecutionContextIdManager.ids[event_digest] += 1
else:
ExecutionContextIdManager.ids[event_digest] = 0
return ExecutionContextIdManager.ids[event_digest]
+46
View File
@@ -0,0 +1,46 @@
import logging
enabled = []
disabled = ["init", "sdp", "parsers", "evaluators", "verbose"]
console_handler = logging.StreamHandler()
all_loggers = {}
def set_enabled(to_enable):
if to_enable is None:
return
if not hasattr(to_enable, "__iter__"):
to_enable = [to_enable]
enabled.extend(to_enable)
def to_discard(logger_class):
if logger_class in enabled:
return False
if logger_class not in disabled:
return False
return True
def get_logger(logger_name):
if logger_name in all_loggers:
return all_loggers[logger_name]
logger = logging.getLogger(logger_name)
all_loggers[logger_name] = logger
for d in disabled:
if logger_name.startswith(d + ".") and to_discard(d):
logger.disabled = True
for e in enabled:
if logger_name.startswith("verbose." + e):
logger.disabled = False
return logger
+18 -2
View File
@@ -113,16 +113,21 @@ def get_classes_from_package(package_name):
yield c
def get_sub_classes(package_name, base_class):
def init_package_import(package_name):
pkg = __import__(package_name)
prefix = pkg.__name__ + "."
for (module_loader, name, ispkg) in pkgutil.iter_modules(pkg.__path__, prefix):
importlib.import_module(name)
def get_sub_classes(package_name, base_class):
base_class = get_class(base_class) if isinstance(base_class, str) else base_class
return set(base_class.__subclasses__()).union(
all_class = set(base_class.__subclasses__()).union(
[s for c in base_class.__subclasses__() for s in get_sub_classes(package_name, c)])
# limit to the classes of the package
return [c for c in all_class if c.__module__.startswith(package_name)]
def remove_from_list(lst, to_remove_predicate):
"""
@@ -143,6 +148,17 @@ def remove_from_list(lst, to_remove_predicate):
return lst
def remove_list_from_list(lst, to_remove):
# https://stackoverflow.com/questions/2514961/remove-all-values-within-one-list-from-another-list/30353802
# explains that list comprehension is not the best approach
for item in to_remove:
try:
lst.remove(item)
except ValueError:
pass
return lst
def product(a, b):
"""
Kind of cartesian product between lists a and b
+4 -4
View File
@@ -5,12 +5,9 @@ from core.concept import Concept
from evaluators.BaseEvaluator import OneReturnValueEvaluator
from parsers.ConceptLexerParser import ParsingExpression, ParsingExpressionVisitor
from parsers.DefaultParser import DefConceptNode
import logging
from parsers.PythonParser import PythonNode
log = logging.getLogger(__name__)
class ConceptOrRuleNameVisitor(ParsingExpressionVisitor):
"""
@@ -49,7 +46,7 @@ class AddConceptEvaluator(OneReturnValueEvaluator):
isinstance(return_value.value.value, DefConceptNode)
def eval(self, context, return_value):
log.debug("Adding a new concept")
context.log(self.log, "Adding a new concept", self.name)
def_concept_node = return_value.value.value
sheerka = context.sheerka
@@ -89,6 +86,9 @@ class AddConceptEvaluator(OneReturnValueEvaluator):
concept.bnf = def_concept_node.definition.value.value
ret = sheerka.create_new_concept(context, concept)
if not ret.status:
error_cause = sheerka.value(ret.body)
context.log(self.log, f"Failed to add concept '{concept.name}'. Reason: {error_cause}", self.name)
return sheerka.ret(self.name, ret.status, ret.value, parents=[return_value])
@staticmethod
+15 -7
View File
@@ -1,14 +1,22 @@
from core.sheerka import ExecutionContext
from core.sheerka_logger import get_logger
class BaseEvaluator:
"""
Base class to evaluate ReturnValues
"""
PREFIX = "Evaluators:"
PREFIX = "evaluators."
enabled = True
def __init__(self, name, priority: int):
self.log = get_logger(self.PREFIX + self.__class__.__name__)
self.init_log = get_logger("init." + self.PREFIX + self.__class__.__name__)
self.verbose_log = get_logger("verbose." + self.PREFIX + self.__class__.__name__)
def __init__(self, name, priority: int, enabled=True):
self.name = self.PREFIX + name
self.priority = priority
self.enabled = enabled
class OneReturnValueEvaluator(BaseEvaluator):
@@ -16,10 +24,10 @@ class OneReturnValueEvaluator(BaseEvaluator):
Evaluate one specific return value
"""
def matches(self, context, return_value):
def matches(self, context: ExecutionContext, return_value):
pass
def eval(self, context, return_value):
def eval(self, context: ExecutionContext, return_value):
pass
@@ -28,8 +36,8 @@ class AllReturnValuesEvaluator(BaseEvaluator):
Evaluates the groups of ReturnValues
"""
def matches(self, context, return_values):
def matches(self, context: ExecutionContext, return_values):
pass
def eval(self, context, return_values):
def eval(self, context: ExecutionContext, return_values):
pass
+8 -7
View File
@@ -2,9 +2,6 @@ from core.builtin_concepts import ParserResultConcept, BuiltinConcepts
import core.builtin_helpers
from core.concept import Concept, ConceptParts
from evaluators.BaseEvaluator import OneReturnValueEvaluator
import logging
log = logging.getLogger(__name__)
class ConceptEvaluator(OneReturnValueEvaluator):
@@ -15,7 +12,11 @@ class ConceptEvaluator(OneReturnValueEvaluator):
Then checks the POST conditions
"""
NAME = "Concept"
evaluation_steps = [BuiltinConcepts.EVALUATION, BuiltinConcepts.AFTER_EVALUATION]
evaluation_steps = [
BuiltinConcepts.BEFORE_EVALUATION,
BuiltinConcepts.EVALUATION,
BuiltinConcepts.AFTER_EVALUATION
]
def __init__(self):
super().__init__(self.NAME, 50)
@@ -40,7 +41,7 @@ class ConceptEvaluator(OneReturnValueEvaluator):
# Evaluate the properties
for prop in concept.props:
sub_context = context.push(self.name, f"Evaluating property '{prop}'", concept)
sub_context = context.push(self.name, desc=f"Evaluating property '{prop}'", obj=concept)
res = self.evaluate_parsing(sheerka, sub_context, concept.cached_asts[prop])
if res.status:
concept.set_prop(prop, res.value)
@@ -60,11 +61,11 @@ class ConceptEvaluator(OneReturnValueEvaluator):
if body is None:
raise NotImplementedError("Seems weird !")
sub_context = context.push(self.name, "Evaluating body", concept)
sub_context = context.push(self.name, desc="Evaluating body", obj=concept)
res = self.evaluate_parsing(sheerka, sub_context, body)
return sheerka.ret(self.name, res.status, res.value, parents=[return_value])
def evaluate_parsing(self, sheerka, context, parsing_result):
res = sheerka.chain_process(context, parsing_result, self.evaluation_steps)
res = sheerka.execute(context, parsing_result, self.evaluation_steps, self.log)
res = core.builtin_helpers.expect_one(context, res)
return res
+1 -5
View File
@@ -1,11 +1,7 @@
from core.builtin_concepts import ParserResultConcept, BuiltinConcepts
from evaluators.BaseEvaluator import OneReturnValueEvaluator
import logging
from parsers.ConceptLexerParser import ConceptNode, TerminalNode, NonTerminalNode, ConceptMatch
log = logging.getLogger(__name__)
from parsers.ConceptLexerParser import ConceptNode, NonTerminalNode, ConceptMatch
class ConceptNodeEvaluator(OneReturnValueEvaluator):
@@ -1,12 +1,8 @@
from core.builtin_concepts import BuiltinConcepts
import core.builtin_helpers
from evaluators.BaseEvaluator import AllReturnValuesEvaluator, BaseEvaluator
import logging
from parsers.BaseParser import BaseParser
log = logging.getLogger(__name__)
class MultipleSameSuccessEvaluator(AllReturnValuesEvaluator):
"""
-4
View File
@@ -1,11 +1,7 @@
from core.builtin_concepts import BuiltinConcepts
from evaluators.BaseEvaluator import AllReturnValuesEvaluator
import logging
from parsers.BaseParser import BaseParser
log = logging.getLogger(__name__)
class OneSuccessEvaluator(AllReturnValuesEvaluator):
"""
+6 -6
View File
@@ -7,10 +7,6 @@ from parsers.PythonParser import PythonNode
import ast
import core.ast.nodes
import logging
log = logging.getLogger(__name__)
class PythonEvaluator(OneReturnValueEvaluator):
NAME = "Python"
@@ -31,17 +27,21 @@ class PythonEvaluator(OneReturnValueEvaluator):
sheerka = context.sheerka
node = return_value.value.value
try:
log.debug(f"Evaluating python node {node}")
context.log(self.verbose_log, f"Evaluating python node {node}", self.name)
my_locals = self.get_locals(context, node.ast_)
context.log(self.verbose_log, f"locals={my_locals}", self.name)
if isinstance(node.ast_, ast.Expression):
context.log(self.verbose_log, "Evaluating using 'eval'", self.name)
compiled = compile(node.ast_, "<string>", "eval")
evaluated = eval(compiled, {}, my_locals)
else:
context.log(self.verbose_log, "Evaluating using 'exec'", self.name)
evaluated = self.exec_with_return(node.ast_, my_locals)
return sheerka.ret(self.name, True, evaluated, parents=[return_value])
except Exception as error:
context.log_error(self.verbose_log, error, self.name)
error = sheerka.new(BuiltinConcepts.ERROR, body=error)
return sheerka.ret(self.name, False, error, parents=[return_value])
@@ -60,7 +60,7 @@ class PythonEvaluator(OneReturnValueEvaluator):
if context.sheerka.isinstance(concept, BuiltinConcepts.UNKNOWN_CONCEPT):
continue
sub_context = context.push(self.name, "Evaluating body", concept)
sub_context = context.push(self.name, desc="Evaluating body", obj=concept)
context.sheerka.eval_concept(sub_context, concept, ["body"])
if not context.sheerka.isa(concept.body, BuiltinConcepts.ERROR):
+7 -4
View File
@@ -1,12 +1,10 @@
import logging
from core.builtin_concepts import BuiltinConcepts
import core.builtin_helpers
from evaluators.BaseEvaluator import AllReturnValuesEvaluator, BaseEvaluator
import logging
from parsers.BaseParser import BaseParser
log = logging.getLogger(__name__)
class TooManySuccessEvaluator(AllReturnValuesEvaluator):
"""
@@ -49,6 +47,11 @@ class TooManySuccessEvaluator(AllReturnValuesEvaluator):
def eval(self, context, return_values):
sheerka = context.sheerka
if self.verbose_log.isEnabledFor(logging.DEBUG):
for s in self.success:
context.log(self.verbose_log, s, self.name)
context.log(self.verbose_log, f"value={sheerka.value(s.value)}", self.name)
if not core.builtin_helpers.is_same_success(sheerka, self.success):
too_many_success = sheerka.new(BuiltinConcepts.TOO_MANY_SUCCESS, body=self.success)
return sheerka.ret(self.name, False, too_many_success, parents=return_values)
+1 -1
View File
@@ -30,7 +30,7 @@ def main(argv):
sheerka.initialize()
_in = core.utils.sysarg_to_string(args)
result = sheerka.eval(_in)
result = sheerka.evaluate_user_input(_in)
for res in result:
logging.info(res)
+26 -8
View File
@@ -1,5 +1,7 @@
from dataclasses import dataclass
from core.tokenizer import TokenKind, Keywords
from core.sheerka_logger import get_logger
import logging
@dataclass()
@@ -34,9 +36,13 @@ class UnexpectedTokenErrorNode(ErrorNode):
class BaseParser:
PREFIX = "Parsers:"
PREFIX = "parsers."
def __init__(self, name):
self.log = get_logger("parsers." + self.__class__.__name__)
self.init_log = get_logger("init." + self.PREFIX + self.__class__.__name__)
self.verbose_log = get_logger("verbose." + self.PREFIX + self.__class__.__name__)
self.name = self.PREFIX + name
self.has_error = False
self.error_sink = []
@@ -52,6 +58,25 @@ class BaseParser:
def parse(self, context, text):
pass
def log_result(self, context, source, ret):
if not self.log.isEnabledFor(logging.DEBUG):
return
if ret.status:
value = context.return_value_to_str(ret)
context.log(self.log, f"Recognized '{source}' as {value}", self.name)
else:
context.log(self.log, f"Failed to recognize '{source}'", self.name)
def log_multiple_results(self, context, source, list_of_ret):
if not self.log.isEnabledFor(logging.DEBUG):
return
context.log(self.log, f"Recognized '{source}' as multiple concepts", self.name)
for r in list_of_ret:
value = context.return_value_to_str(r)
context.log(self.log, f" Recognized '{value}'", self.name)
@staticmethod
def get_text_from_tokens(tokens):
if tokens is None:
@@ -65,10 +90,3 @@ class BaseParser:
value = Keywords(token.value).value if token.type == TokenKind.KEYWORD else token.value
res += value
return res
@staticmethod
def log_result(log, text, ret):
if ret.status:
log.debug(f"Recognized '{text}' as {ret.value}")
else:
log.debug(f"Failed to recognize '{text}'")
+1 -1
View File
@@ -30,7 +30,7 @@ class BnfParser:
def __init__(self):
self.has_error = False
self.error_sink = []
self.name = BaseParser.PREFIX + "RegexParser"
self.name = BaseParser.PREFIX + "Bnf"
self.lexer_iter = None
self._current = None
+14 -7
View File
@@ -13,9 +13,6 @@ from core.concept import Concept
from core.tokenizer import TokenKind, Tokenizer, Token
from parsers.BaseParser import BaseParser, Node, ErrorNode
import core.utils
import logging
log = logging.getLogger(__name__)
def flatten(iterable):
@@ -588,8 +585,8 @@ class ConceptLexerParser(BaseParser):
return self.sheerka.ret(self.name, True, self.concepts_grammars)
def get_concept(self, concept_name):
if concept_name in self.context.concepts_cache:
return self.context.concepts_cache[concept_name]
if concept_name in self.context.concepts:
return self.context.concepts[concept_name]
return self.sheerka.get(concept_name)
def get_model(self, concept_def, concepts_to_resolve):
@@ -668,6 +665,9 @@ class ConceptLexerParser(BaseParser):
if self.sheerka.isinstance(e, BuiltinConcepts.UNKNOWN_CONCEPT):
continue
if e not in self.concepts_grammars:
continue
to_resolve = self.concepts_grammars[e]
if _is_infinite_recursion(e, to_resolve):
removed_concepts.append(e)
@@ -730,7 +730,7 @@ class ConceptLexerParser(BaseParser):
# manage when nothing is recognized (or other error)
if self.has_error:
return self.sheerka.ret(
ret = self.sheerka.ret(
self.name,
False,
self.sheerka.new(
@@ -739,6 +739,8 @@ class ConceptLexerParser(BaseParser):
source=text,
body=self.error_sink,
try_parsed=concepts_found[0] if len(concepts_found) == 1 else concepts_found))
self.log_result(context, text, ret)
return ret
# else
# returns as many ReturnValue than choices found
@@ -755,7 +757,12 @@ class ConceptLexerParser(BaseParser):
body=choice,
try_parsed=choice)))
return ret[0] if len(ret) == 1 else ret
if len(ret) == 1:
self.log_result(context, text, ret[0])
return ret[0]
else:
self.log_multiple_results(context, text, ret)
return ret
@staticmethod
def get_bests(results):
+29 -21
View File
@@ -5,11 +5,8 @@ import core.utils
from parsers.BaseParser import BaseParser, Node, ErrorNode, NotInitializedNode
from core.tokenizer import Tokenizer, TokenKind, Token, Keywords
from dataclasses import dataclass, field
import logging
from parsers.BnfParser import BnfParser
log = logging.getLogger(__name__)
from core.sheerka import ExecutionContext
@dataclass()
@@ -207,10 +204,10 @@ class DefaultParser(BaseParser):
"""
def __init__(self, **kwargs):
BaseParser.__init__(self, "DefaultParser")
BaseParser.__init__(self, "Default")
self.lexer_iter = None
self._current = None
self.context = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
@@ -287,11 +284,12 @@ class DefaultParser(BaseParser):
def parse(self, context, text):
# default parser can only manage string text
if not isinstance(text, str):
log.debug(f"Failed to recognize '{text}'")
return context.sheerka.ret(
ret = context.sheerka.ret(
self.name,
False,
context.sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=text))
self.log_result(context, text, ret)
return ret
self.reset_parser(context, text)
tree = self.parse_statement()
@@ -299,23 +297,29 @@ class DefaultParser(BaseParser):
# If a error is found it must be sent to error_sink
# tree must contain what was recognized
ret = self.sheerka.ret(
self.name,
not self.has_error,
self.sheerka.new(
if self.has_error and isinstance(self.error_sink[0], CannotHandleErrorNode):
body = self.sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=self.error_sink)
else:
body = self.sheerka.new(
BuiltinConcepts.PARSER_RESULT,
parser=self,
source=text,
body=self.error_sink if self.has_error else tree,
try_parsed=tree))
try_parsed=tree)
self.log_result(log, text, ret)
ret = self.sheerka.ret(
self.name,
not self.has_error,
body)
self.log_result(context, text, ret)
return ret
def parse_statement(self):
token = self.get_token()
if token.value == Keywords.DEF:
self.next_token()
self.context.log(self.verbose_log, "Keyword DEF found.", self.name)
return self.parse_def_concept(token)
else:
return self.add_error(CannotHandleErrorNode([], self.text))
@@ -326,7 +330,6 @@ class DefaultParser(BaseParser):
"""
# init
log.debug("It may be a definition of a concept")
keywords_tokens = [def_token]
concept_found = DefConceptNode(keywords_tokens)
@@ -354,7 +357,6 @@ class DefaultParser(BaseParser):
concept_found.post = asts_found_by_parts[Keywords.POST]
concept_found.body = asts_found_by_parts[Keywords.AS]
log.debug(f"Found DefConcept node '{concept_found}'")
return concept_found
def regroup_tokens_by_parts(self, keywords_tokens):
@@ -412,7 +414,8 @@ class DefaultParser(BaseParser):
if TokenKind.NEWLINE in [t.type for t in name_tokens]:
self.add_error(SyntaxErrorNode(tokens_found_by_parts[Keywords.CONCEPT], "Newline are not allowed in name."))
return NameNode(name_tokens[name_first_token_index:]) # skip the first token
name_node = NameNode(name_tokens[name_first_token_index:]) # skip the first token
return name_node
def get_concept_definition(self, tokens_found_by_parts):
if tokens_found_by_parts[Keywords.FROM] is None:
@@ -448,8 +451,6 @@ class DefaultParser(BaseParser):
if keyword == Keywords.CONCEPT or keyword == Keywords.FROM:
continue # already done
log.debug("Processing part '" + keyword.name + "'")
tokens = tokens_found_by_parts[keyword]
if tokens is None:
continue # nothing to do
@@ -464,8 +465,15 @@ class DefaultParser(BaseParser):
continue
# ask the other parsers if they recognize the tokens
new_context = self.context.push(self.name)
parsing_result = core.builtin_helpers.expect_one(new_context, self.sheerka.parse(new_context, tokens))
new_context = self.context.push(self.name, desc=f"Parsing {keyword}")
new_context.log_new(self.verbose_log)
to_parse = self.sheerka.ret(
new_context.who,
True,
self.sheerka.new(BuiltinConcepts.USER_INPUT, body=tokens))
steps = [BuiltinConcepts.PARSING]
parsed = self.sheerka.execute(new_context, to_parse, steps, self.verbose_log)
parsing_result = core.builtin_helpers.expect_one(new_context, parsed)
if not parsing_result.status:
self.add_error(parsing_result.value)
continue
+6 -5
View File
@@ -11,7 +11,7 @@ class EmptyStringParser(BaseParser):
"""
def __init__(self, **kwargs):
BaseParser.__init__(self, "NullParser")
BaseParser.__init__(self, "EmptyString")
def parse(self, context, text):
sheerka = context.sheerka
@@ -19,12 +19,13 @@ class EmptyStringParser(BaseParser):
if isinstance(text, str) and text.strip() == "" or \
isinstance(text, list) and text == [] or \
text is None:
log.debug(f"Recognized '{text}' as BuiltinConcepts.NOP.")
return sheerka.ret(self.name, True, sheerka.new(
ret = sheerka.ret(self.name, True, sheerka.new(
BuiltinConcepts.PARSER_RESULT,
parser=self,
source="",
body=sheerka.new(BuiltinConcepts.NOP)))
else:
ret = sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.NOT_FOR_ME))
log.debug(f"Failed to recognize '{text}'")
return sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.NOT_FOR_ME))
self.log_result(context, text, ret)
return ret
+9 -4
View File
@@ -15,7 +15,7 @@ class ExactConceptParser(BaseParser):
MAX_WORDS_SIZE = 10
def __init__(self, **kwargs):
BaseParser.__init__(self, "ConceptParser")
BaseParser.__init__(self, "ExactConcept")
def parse(self, context, text):
"""
@@ -56,14 +56,19 @@ class ExactConceptParser(BaseParser):
source=text if isinstance(text, str) else self.get_text_from_tokens(text),
body=concept,
try_parsed=concept)))
log.debug(f"Recognized '{text}' as '{concept}'")
recognized = True
if recognized:
if len(res) == 1:
self.log_result(context, text, res[0])
else:
self.log_multiple_results(context, text, res)
return res
return res
log.debug(f"Failed to recognize {words}")
return sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.UNKNOWN_CONCEPT, body=text))
ret = sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.UNKNOWN_CONCEPT, body=text))
self.log_result(context, text, ret)
return ret
@staticmethod
def get_words(text):
+3 -5
View File
@@ -13,7 +13,7 @@ class PythonErrorNode(ErrorNode):
exception: Exception
# def __post_init__(self):
# log.debug("-> PythonErrorNode: " + str(self.exception))
# self.log.debug("-> PythonErrorNode: " + str(self.exception))
@dataclass()
@@ -58,7 +58,7 @@ class PythonParser(BaseParser):
def __init__(self, **kwargs):
BaseParser.__init__(self, "PythonParser")
BaseParser.__init__(self, "Python")
self.source = kwargs.get("source", "<undef>")
def parse(self, context, text):
@@ -87,7 +87,7 @@ class PythonParser(BaseParser):
body=self.error_sink if self.has_error else PythonNode(text, tree),
try_parsed=None))
self.log_result(log, text, ret)
self.log_result(context, text, ret)
return ret
def try_parse_expression(self, text):
@@ -110,8 +110,6 @@ class PythonGetNamesVisitor(ast.NodeVisitor):
def __init__(self):
self.names = set()
log.debug("Searching for names.")
def visit_Name(self, node):
log.debug(f"Found name : {node.id}")
self.names.add(node.id)
+13 -11
View File
@@ -5,10 +5,8 @@ import zlib
from sdp.sheerkaDataProviderIO import SheerkaDataProviderIO
from sdp.sheerkaSerializer import Serializer, SerializerContext
import logging
from core.sheerka_logger import get_logger
log = logging.getLogger(__name__)
init_log = logging.getLogger(__name__ + ".init")
def json_default_converter(o):
"""
@@ -276,7 +274,9 @@ class SheerkaDataProvider:
REF_PREFIX = "##REF##:"
def __init__(self, root=None):
init_log.debug("Initializing sdp.")
self.log = get_logger(__name__)
self.init_log = get_logger("init." + __name__)
self.init_log.debug("Initializing sdp.")
self.io = SheerkaDataProviderIO.get(root)
self.first_time = self.io.first_time
@@ -323,7 +323,6 @@ class SheerkaDataProvider:
return None
@staticmethod
def get_stream_digest(stream):
sha256_hash = hashlib.sha256()
@@ -353,14 +352,14 @@ class SheerkaDataProvider:
snapshot = self.get_snapshot()
state = self.load_state(snapshot)
log.debug(f"Adding obj '{obj}' in entry '{entry}' (allow_multiple={allow_multiple}, use_ref={use_ref})")
self.log.debug(f"Adding obj '{obj}' in entry '{entry}' (allow_multiple={allow_multiple}, use_ref={use_ref})")
if not isinstance(obj, ObjToUpdate):
obj = ObjToUpdate(obj)
# check uniqueness, cannot add the same key twice if allow_multiple == False
key = obj.get_key()
log.debug(f"key found : '{key}'") if key else log.debug("No key found")
self.log.debug(f"key found : '{key}'") if key else self.log.debug("No key found")
if not allow_multiple:
if isinstance(obj.obj, dict):
for k in obj.obj:
@@ -505,6 +504,9 @@ class SheerkaDataProvider:
filter_to_use = (lambda k, o: True) if filter is None else filter
for key, element in elements.items():
if filter_to_use(key, element):
if isinstance(element, list):
yield [self.load_ref_if_needed(e)[0] for e in element]
else:
yield self.load_ref_if_needed(element)[0]
else:
# manage when no key is defined for the elements
@@ -643,7 +645,7 @@ class SheerkaDataProvider:
def save_state(self, state: State):
digest = state.get_digest()
log.debug(f"Saving new state. digest={digest}")
self.log.debug(f"Saving new state. digest={digest}")
target_path = self.io.get_obj_path(SheerkaDataProvider.StateFolder, digest)
if self.io.exists(target_path):
return digest
@@ -660,18 +662,18 @@ class SheerkaDataProvider:
return self.serializer.deserialize(f, None)
def save_obj(self, obj):
log.debug(f"Saving '{obj}' as reference...")
self.log.debug(f"Saving '{obj}' as reference...")
stream = self.serializer.serialize(obj, SerializerContext(user_name="kodjo"))
digest = obj.get_digest() if hasattr(obj, "get_digest") else self.get_stream_digest(stream)
target_path = self.io.get_obj_path(SheerkaDataProvider.ObjectsFolder, digest)
if self.io.exists(target_path):
log.debug(f"...already saved. digest is {digest}")
self.log.debug(f"...already saved. digest is {digest}")
return digest
self.io.write_binary(target_path, stream.read())
log.debug(f"...digest={digest}.")
self.log.debug(f"...digest={digest}.")
return digest
def load_obj(self, digest, add_origin=True):
+8 -8
View File
@@ -2,13 +2,15 @@ import io
from os import path
import os
from fs.memoryfs import MemoryFS
import logging
from core.sheerka_logger import get_logger
class SheerkaDataProviderIO:
def __init__(self, root):
self.root = root
self.log = get_logger(__name__)
self.init_log = get_logger("init." + __name__)
def exists(self, file_path):
pass
@@ -48,16 +50,15 @@ class SheerkaDataProviderIO:
class SheerkaDataProviderFileIO(SheerkaDataProviderIO):
def __init__(self, root):
self.log = logging.getLogger(self.__class__.__name__ + ".init")
root = path.abspath(path.join(path.expanduser("~"), ".sheerka")) \
if root is None \
else path.abspath(root)
super().__init__(root)
self.log.debug("root is set to '" + self.root + "'")
self.init_log.debug("root is set to '" + self.root + "'")
if not path.exists(self.root):
self.log.debug("root folder not found. Creating it.")
self.init_log.debug("root folder not found. Creating it.")
os.makedirs(self.root)
self.first_time = True
else:
@@ -96,12 +97,12 @@ class SheerkaDataProviderFileIO(SheerkaDataProviderIO):
class SheerkaDataProviderMemoryIO(SheerkaDataProviderIO):
log = logging.getLogger("MemoryIO")
def __init__(self):
super().__init__("")
self.mem_fs = MemoryFS()
self.log.debug("Initializing memory file.")
self.init_log.debug("Initializing memory file.")
self.first_time = True
def open(self, file_path, mode):
@@ -133,12 +134,11 @@ class SheerkaDataProviderMemoryIO(SheerkaDataProviderIO):
class SheerkaDataProviderDictionaryIO(SheerkaDataProviderIO):
log = logging.getLogger("DictionaryIO")
def __init__(self):
super().__init__("")
self.cache = {}
self.log.debug("Initializing dictionary file.")
self.init_log.debug("Initializing dictionary file.")
self.first_time = True
def exists(self, file_path):
+5 -7
View File
@@ -4,17 +4,13 @@ import datetime
import struct
import io
from dataclasses import dataclass
import logging
from core.sheerka_logger import get_logger
from enum import Enum
import core.utils
from core.concept import Concept
log = logging.getLogger(__name__)
init_log = logging.getLogger(__name__ + ".init")
def json_default_converter(o):
"""
Default formatter for json
@@ -45,7 +41,9 @@ class Serializer:
HISTORY = "##history##"
def __init__(self):
init_log.debug("Initializing serializers")
self.log = get_logger(__name__)
self.init_log = get_logger("init." + __name__)
self.init_log.debug("Initializing serializers")
self._cache = []
# add builtin serializers
@@ -60,7 +58,7 @@ class Serializer:
:param serializer:
:return:
"""
init_log.debug(f"Adding serializer {serializer}")
self.init_log.debug(f"Adding serializer {serializer}")
self._cache.append(serializer)
def serialize(self, obj, context):
+2 -2
View File
@@ -41,7 +41,7 @@ def get_concept_part(part):
if isinstance(part, str):
node = PythonNode(part, ast.parse(part, mode="eval"))
return ReturnValueConcept(
who="Parsers:DefaultParser",
who="parsers.Default",
status=True,
value=ParserResultConcept(
source=part,
@@ -50,7 +50,7 @@ def get_concept_part(part):
if isinstance(part, PythonNode):
return ReturnValueConcept(
who="Parsers:DefaultParser",
who="parsers.Default",
status=True,
value=ParserResultConcept(
source=part.source,
+12
View File
@@ -729,6 +729,18 @@ def test_i_can_detect_indirect_infinite_recursion_with_sequence_or_ordered_choic
assert bar not in parser.concepts_grammars # removed because of the infinite recursion
def test_infinite_recursion_does_not_fail_if_a_concept_is_missing():
foo = Concept(name="foo")
bar = Concept(name="bar")
concepts = {
foo: bar
}
parser = ConceptLexerParser()
parser.initialize(get_context(), concepts)
assert foo in parser.concepts_grammars
def test_i_can_detect_indirect_infinite_recursion_with_optional():
# TODO infinite recursion with optional
pass
+15 -4
View File
@@ -6,7 +6,7 @@ from core.sheerka import Sheerka, ExecutionContext
from parsers.ConceptLexerParser import OrderedChoice, StrMatch, ConceptMatch
from parsers.PythonParser import PythonParser, PythonNode
from core.tokenizer import Keywords, Tokenizer
from parsers.DefaultParser import DefaultParser, NameNode, SyntaxErrorNode
from parsers.DefaultParser import DefaultParser, NameNode, SyntaxErrorNode, CannotHandleErrorNode
from parsers.DefaultParser import UnexpectedTokenErrorNode, DefConceptNode
from parsers.BnfParser import BnfParser
@@ -68,7 +68,7 @@ def get_concept(name, where=None, pre=None, post=None, body=None, definition=Non
concept.post = get_concept_part(post)
if definition:
concept.definition = ReturnValueConcept(
"Parsers:RegexParser",
"parsers.Bnf",
True,
definition)
@@ -85,7 +85,7 @@ def get_concept_part(part):
if isinstance(part, str):
node = PythonNode(part, ast.parse(part, mode="eval"))
return ReturnValueConcept(
who="Parsers:DefaultParser",
who="parsers.Default",
status=True,
value=ParserResultConcept(
source=part,
@@ -94,7 +94,7 @@ def get_concept_part(part):
if isinstance(part, PythonNode):
return ReturnValueConcept(
who="Parsers:DefaultParser",
who="parsers.Default",
status=True,
value=ParserResultConcept(
source=part.source,
@@ -359,3 +359,14 @@ def test_i_can_detect_empty_bnf_declaration():
assert not res.status
assert res.value.value[0] == SyntaxErrorNode([], "Empty declaration")
def test_i_can_detect_not_for_me():
text = "hello world"
context = get_context()
parser = DefaultParser()
res = parser.parse(context, text)
assert not res.status
assert context.sheerka.isinstance(res.value, BuiltinConcepts.NOT_FOR_ME)
assert isinstance(res.value.body[0], CannotHandleErrorNode)
+39
View File
@@ -0,0 +1,39 @@
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept
from core.sheerka import ExecutionContext
def test_id_is_incremented_by_event_digest():
a = ExecutionContext("foo", "event_1", None)
b = ExecutionContext("foo", "event_1", None)
c = ExecutionContext("foo", "event_2", None)
d = b.push()
e = c.push()
assert a.id == 0
assert b.id == 1
assert c.id == 0
assert d.id == 2
assert e.id == 1
def test_some_properties_are_given_to_the_child():
a = ExecutionContext("foo", "event_1", "fake_sheerka",
desc="some description",
obj=Concept("foo"),
step=BuiltinConcepts.EVALUATION,
iteration=15,
concepts={"bar": Concept("bar")})
b = a.push()
assert b.who == a.who
assert b.event_digest == a.event_digest
assert b.sheerka == a.sheerka
assert b.desc == ""
assert b.obj == a.obj
assert b.step == a.step
assert b.iteration == a.iteration
assert b.concepts == a.concepts
assert b.id == a.id + 1
assert b._tab == a._tab + " "
+53 -29
View File
@@ -61,12 +61,12 @@ def test_builtin_concepts_are_initialized():
def test_builtin_concepts_can_be_updated():
sheerka = get_sheerka(False, skip_builtins_in_db=False)
sheerka = get_sheerka(False, False)
loaded_sheerka = sheerka.get(BuiltinConcepts.SHEERKA)
loaded_sheerka.metadata.desc = "I have a description"
sheerka.sdp.modify("Test", sheerka.CONCEPTS_ENTRY, loaded_sheerka.key, loaded_sheerka)
sheerka = get_sheerka(False)
sheerka = get_sheerka(False, False)
loaded_sheerka = sheerka.get(BuiltinConcepts.SHEERKA)
assert loaded_sheerka.metadata.desc == "I have a description"
@@ -313,6 +313,12 @@ def test_i_cannot_get_value_when_no_body_and_allow_none_body_is_false():
body=concept)
def test_list_of_concept_is_sorted_by_id():
sheerka = get_sheerka(False, False)
concepts = sheerka.concepts()
assert concepts[0].id < concepts[-1].id
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# E V A L U A T I O N S
@@ -326,7 +332,7 @@ def test_i_cannot_get_value_when_no_body_and_allow_none_body_is_false():
def test_i_can_eval_python_expressions_with_no_variable(text, expected):
sheerka = get_sheerka()
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
@@ -339,7 +345,7 @@ def test_i_can_eval_concept_with_python_body():
sheerka.add_in_cache(concept)
text = "one"
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
assert res[0].value == 1
@@ -352,7 +358,7 @@ def test_i_can_eval_concept_with_concept_body():
sheerka.add_in_cache(concept_one)
sheerka.add_in_cache(concept_un)
res = sheerka.eval("un")
res = sheerka.evaluate_user_input("un")
return_value = res[0].value
assert len(res) == 1
assert res[0].status
@@ -365,7 +371,7 @@ def test_i_can_eval_concept_with_no_body():
sheerka.add_in_cache(concept)
text = "one"
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
assert res[0].value == concept
@@ -378,7 +384,7 @@ def test_is_unique_property_is_used_when_evaluating():
sheerka.add_in_cache(concept)
text = "one"
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
assert res[0].value == concept
@@ -403,7 +409,7 @@ as:
expected.init_key()
sheerka = get_sheerka()
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
@@ -431,7 +437,7 @@ def test_i_can_eval_def_concept_part_when_one_part_is_a_ref_of_another_concept()
concept_a_plus_b = Concept(name="a plus b").set_prop("a").set_prop("b")
sheerka.add_in_cache(concept_a_plus_b)
res = sheerka.eval("def concept a xx b as a plus b")
res = sheerka.evaluate_user_input("def concept a xx b as a plus b")
expected = Concept(name="a xx b", body="a plus b").set_prop("a").set_prop("b").init_key()
expected.metadata.id = "1001"
@@ -462,14 +468,31 @@ as:
"""
sheerka = get_sheerka()
sheerka.eval(text)
res = sheerka.eval(text)
sheerka.evaluate_user_input(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert not res[0].status
assert sheerka.isinstance(res[0].value, BuiltinConcepts.CONCEPT_ALREADY_DEFINED)
def test_i_can_disable_an_evaluator():
sheerka = get_sheerka()
concept = Concept(name="one", body="1")
sheerka.add_in_cache(concept)
text = "one"
p = next(e for e in sheerka.evaluators if e.__name__ == "PythonEvaluator")
p.enabled = False # not that you disable the class, not the instance
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
assert sheerka.isinstance(res[0].value, BuiltinConcepts.PARSER_RESULT)
p.enabled = True # put back for the remaining unit tests
@pytest.mark.parametrize("text", [
"",
" ",
@@ -478,7 +501,7 @@ as:
def test_i_can_eval_a_empty_input(text):
sheerka = get_sheerka()
res = sheerka.eval(text)
res = sheerka.evaluate_user_input(text)
assert len(res) == 1
assert res[0].status
@@ -492,7 +515,7 @@ def test_i_can_eval_concept_with_variable():
sheerka.add_in_cache(concept_hello)
sheerka.add_in_cache(concept_foo)
res = sheerka.eval("hello foo")
res = sheerka.evaluate_user_input("hello foo")
return_value = res[0].value
assert len(res) == 1
assert res[0].status
@@ -505,7 +528,7 @@ def test_i_can_eval_concept_with_variable_and_python_as_body():
sheerka.add_in_cache(Concept(name="hello a", body="'hello ' + a").set_prop("a"))
sheerka.add_in_cache(Concept(name="foo", body="'foo'"))
res = sheerka.eval("hello foo")
res = sheerka.evaluate_user_input("hello foo")
assert len(res) == 1
assert res[0].status
assert res[0].value, "hello foo"
@@ -518,7 +541,7 @@ def test_i_can_eval_duplicate_concepts_with_same_value():
sheerka.add_in_cache(Concept(name="hello foo", body="'hello foo'"))
sheerka.add_in_cache(Concept(name="foo", body="'foo'"))
res = sheerka.eval("hello foo")
res = sheerka.evaluate_user_input("hello foo")
assert len(res) == 1
assert res[0].status
assert res[0].value, "hello foo"
@@ -532,7 +555,7 @@ def test_i_cannot_manage_duplicate_concepts_when_the_values_are_different():
sheerka.add_in_cache(Concept(name="hello foo", body="'hello foo'"))
sheerka.add_in_cache(Concept(name="foo", body="'another value'"))
res = sheerka.eval("hello foo")
res = sheerka.evaluate_user_input("hello foo")
assert len(res) == 1
assert not res[0].status
assert sheerka.isinstance(res[0].value, BuiltinConcepts.TOO_MANY_SUCCESS)
@@ -551,7 +574,7 @@ def test_i_can_manage_concepts_with_the_same_key_when_values_are_the_same():
sheerka.create_new_concept(context, Concept(name="hello a", body="'hello ' + a").set_prop("a"))
sheerka.create_new_concept(context, Concept(name="hello b", body="'hello ' + b").set_prop("b"))
res = sheerka.eval("hello 'foo'")
res = sheerka.evaluate_user_input("hello 'foo'")
assert len(res) == 1
assert res[0].status
assert res[0].value == "hello foo"
@@ -563,7 +586,7 @@ def test_i_can_create_concepts_with_python_code_as_body():
context = get_context(sheerka)
sheerka.create_new_concept(context, Concept(name="concepts", body="sheerka.concepts()"))
res = sheerka.eval("concepts")
res = sheerka.evaluate_user_input("concepts")
assert len(res) == 1
assert res[0].status
@@ -571,14 +594,14 @@ def test_i_can_create_concepts_with_python_code_as_body():
def test_i_can_create_concept_with_bnf_definition():
sheerka = get_sheerka()
sheerka = get_sheerka(False, False)
a = Concept("a")
sheerka.add_in_cache(a)
sheerka.concepts_grammars = ConceptLexerParser().initialize(
get_context(sheerka),
{a: OrderedChoice("one", "two")}).body
res = sheerka.eval("def concept plus from bnf a ('plus' plus)?")
res = sheerka.evaluate_user_input("def concept plus from bnf a ('plus' plus)?")
assert len(res) == 1
assert res[0].status
assert sheerka.isinstance(res[0].value, BuiltinConcepts.NEW_CONCEPT)
@@ -605,21 +628,22 @@ def test_i_can_create_concept_with_bnf_definition():
def test_i_can_eval_bnf_definitions():
sheerka = get_sheerka()
concept_a = sheerka.eval("def concept a from bnf 'one' | 'two'")[0].body.body
concept_a = sheerka.evaluate_user_input("def concept a from bnf 'one' | 'two'")[0].body.body
res = sheerka.eval("one")
res = sheerka.evaluate_user_input("one")
assert len(res) == 1
assert res[0].status
assert sheerka.isinstance(res[0].value, concept_a)
def test_i_can_eval_bnf_definitions_with_variables():
sheerka = get_sheerka()
concept_a = sheerka.eval("def concept a from bnf 'one' | 'two'")[0].body.body
concept_b = sheerka.eval("def concept b from bnf a 'three'")[0].body.body
concept_a = sheerka.evaluate_user_input("def concept a from bnf 'one' | 'two'")[0].body.body
concept_b = sheerka.evaluate_user_input("def concept b from bnf a 'three'")[0].body.body
res = sheerka.eval("one three")
res = sheerka.evaluate_user_input("one three")
assert len(res) == 1
assert res[0].status
@@ -635,14 +659,14 @@ def test_i_can_eval_bnf_definitions_from_separate_instances():
but make sure that the BNF are correctly persisted and loaded
"""
sheerka = get_sheerka(False)
concept_a = sheerka.eval("def concept a from bnf 'one' | 'two'")[0].body.body
concept_a = sheerka.evaluate_user_input("def concept a from bnf 'one' | 'two'")[0].body.body
res = get_sheerka(False).eval("one")
res = get_sheerka(False).evaluate_user_input("one")
assert len(res) == 1
assert res[0].status
assert sheerka.isinstance(res[0].value, concept_a)
res = get_sheerka(False).eval("two")
res = get_sheerka(False).evaluate_user_input("two")
assert len(res) == 1
assert res[0].status
assert sheerka.isinstance(res[0].value, concept_a)
@@ -650,7 +674,7 @@ def test_i_can_eval_bnf_definitions_from_separate_instances():
def get_sheerka(use_dict=True, skip_builtins_in_db=True):
root = "mem://" if use_dict else root_folder
sheerka = Sheerka(skip_builtins_in_db)
sheerka = Sheerka(skip_builtins_in_db=skip_builtins_in_db)
sheerka.initialize(root)
return sheerka
+88
View File
@@ -22,6 +22,11 @@ def read_json_file(sdp, file_name):
class ObjWithKey:
"""
Object where the key can be resolved using get_key()
Not suitable for Json dump as there is no to_dict() method
"""
def __init__(self, a, b):
self.a = a
self.b = b
@@ -39,6 +44,11 @@ class ObjWithKey:
class ObjSetKey:
"""
Object where the key can be be automatically set thanks to set_key()
Not suitable for Json dump as there is no to_dict() method
"""
def __init__(self, value, key=None):
self.value = value
self.key = key
@@ -56,6 +66,11 @@ class ObjSetKey:
class ObjNoKey:
"""
Object with no key, they won't be ordered
Not suitable for Json dump as there is no to_dict() method
"""
def __init__(self, a, b):
self.a = a
self.b = b
@@ -73,6 +88,11 @@ class ObjNoKey:
class ObjDumpJson:
"""
Object where the key can be resolved using get_key()
that can be used to dump as Json
"""
def __init__(self, key=None, value=None):
self.key = key
self.value = value
@@ -104,6 +124,12 @@ class ObjDumpJson:
class ObjWithDigestNoKey:
"""
Object that can compute its digest.
It can be used to test objects sharing the same entry (but that are different)
Not suitable for Json dump as there is no to_dict() method
"""
def __init__(self, a, b):
self.a = a
self.b = b
@@ -124,6 +150,12 @@ class ObjWithDigestNoKey:
class ObjWithDigestWithKey:
"""
Object with a key that can compute its digest.
It can be used to test objects sharing the same key (but that are different)
Not suitable for Json dump as there is no to_dict() method
"""
def __init__(self, a, b):
self.a = a
self.b = b
@@ -908,6 +940,62 @@ def test_i_can_list_when_one_element(root):
assert list(result) == ["foo"]
@pytest.mark.parametrize("root", [
".sheerka",
"mem://"
])
def test_i_can_list_when_multiple_entries_under_the_same_key(root):
sdp = SheerkaDataProvider(root)
sdp.add(evt_digest, "entry", ObjWithKey("a", "b"))
sdp.add(evt_digest, "entry", ObjWithKey("a", "c"))
result = sdp.list("entry")
assert list(result) == [[ObjWithKey("a", "b"), ObjWithKey("a", "c")]]
@pytest.mark.parametrize("root", [
".sheerka",
"mem://"
])
def test_i_can_list_when_multiple_entries_under_the_same_key_when_reference(root):
sdp = SheerkaDataProvider(root)
sdp.serializer.register(PickleSerializer(lambda obj: isinstance(obj, ObjWithKey)))
sdp.add(evt_digest, "entry", ObjWithKey("a", "b"), use_ref=True)
sdp.add(evt_digest, "entry", ObjWithKey("a", "c"), use_ref=True)
result = sdp.list("entry")
assert list(result) == [[ObjWithKey("a", "b"), ObjWithKey("a", "c")]]
@pytest.mark.parametrize("root", [
".sheerka",
"mem://"
])
def test_i_can_list_when_multiple_entries_under_the_same_entry(root):
sdp = SheerkaDataProvider(root)
sdp.add(evt_digest, "entry", ObjNoKey("a", "b"))
sdp.add(evt_digest, "entry", ObjNoKey("a", "c"))
result = sdp.list("entry")
assert list(result) == [ObjNoKey("a", "b"), ObjNoKey("a", "c")]
@pytest.mark.parametrize("root", [
".sheerka",
"mem://"
])
def test_i_can_list_when_multiple_entries_under_the_same_entry_when_reference(root):
sdp = SheerkaDataProvider(root)
sdp.serializer.register(PickleSerializer(lambda obj: isinstance(obj, ObjNoKey)))
sdp.add(evt_digest, "entry", ObjNoKey("a", "b"), use_ref=True)
sdp.add(evt_digest, "entry", ObjNoKey("a", "c"), use_ref=True)
result = sdp.list("entry")
assert list(result) == [ObjNoKey("a", "b"), ObjNoKey("a", "c")]
@pytest.mark.parametrize("root", [
".sheerka",
"mem://"
+206
View File
@@ -0,0 +1,206 @@
# Make sure that the evaluators works as expected
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept
from core.sheerka import Sheerka, ExecutionContext
from evaluators.BaseEvaluator import OneReturnValueEvaluator, BaseEvaluator
def get_sheerka():
sheerka = Sheerka()
sheerka.initialize("mem://")
return sheerka
def get_context(sheerka):
return ExecutionContext("test", "xxx", sheerka)
def get_ret_val(sheerka, concept, who="who"):
concept.init_key()
if concept.key not in sheerka.concepts_cache:
sheerka.concepts_cache[concept.key] = concept
return sheerka.ret(who, True, sheerka.new(concept.key))
class EvaluatorWithPriority(OneReturnValueEvaluator):
out = []
def __init__(self, name, priority):
super().__init__(name, priority)
def matches(self, context, return_value):
target = str(return_value.body.key)
step = str(context.step)
text = f"{step} [{context.iteration}] "
text += f"{self.name[len(BaseEvaluator.PREFIX):]} - matches - target={target}"
self.out.append(text)
return True
def eval(self, context, return_value):
target = str(return_value.body.key)
step = str(context.step)
text = f"{step} [{context.iteration}] "
text += f"{self.name[len(BaseEvaluator.PREFIX):]} - eval - target={target}"
self.out.append(text)
class EvaluatorWithPriority10(EvaluatorWithPriority):
def __init__(self):
super().__init__("priority10", 10)
class EvaluatorWithPriority15(EvaluatorWithPriority):
def __init__(self):
super().__init__("priority15", 15)
class EvaluatorWithPriority20(EvaluatorWithPriority):
def __init__(self):
super().__init__("priority20", 20)
class EvaluatorModifyFoo(EvaluatorWithPriority):
def __init__(self):
super().__init__("modifyFoo", 10)
def matches(self, context, return_value):
super().matches(context, return_value)
return context.sheerka.isinstance(return_value.body, "foo")
def eval(self, context, return_value):
super().eval(context, return_value)
return get_ret_val(context.sheerka, Concept("bar"))
class EvaluatorModifyBar(EvaluatorWithPriority):
def __init__(self):
super().__init__("modifyBar", 10)
def matches(self, context, return_value):
super().matches(context, return_value)
return context.sheerka.isinstance(return_value.body, "bar")
def eval(self, context, return_value):
super().eval(context, return_value)
return get_ret_val(context.sheerka, Concept("baz"))
def test_that_return_values_is_unchanged_when_no_evaluator():
sheerka = get_sheerka()
sheerka.evaluators = []
entries = get_ret_val(sheerka, Concept("foo"))
return_values = sheerka.execute(get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
assert return_values == [entries]
def test_i_can_use_a_list_as_input():
sheerka = get_sheerka()
sheerka.evaluators = []
entries = [get_ret_val(sheerka, Concept("foo"))]
return_values = sheerka.execute(get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
assert return_values == entries
def test_step_concept_is_removed_after_processing_if_not_reduced():
"""
The entry is not modified by an evaluator
"""
sheerka = get_sheerka()
sheerka.evaluators = [EvaluatorWithPriority10]
entry = get_ret_val(sheerka, Concept("foo"))
return_values = sheerka.execute(get_context(sheerka), entry, [BuiltinConcepts.EVALUATION])
assert BuiltinConcepts.EVALUATION not in [r.body.key for r in return_values]
def test_step_concept_is_removed_after_processing_if_not_reduced_2():
"""
This time the entry is modified by an evaluator,
nevertheless, step concept is removed
"""
sheerka = get_sheerka()
sheerka.evaluators = [EvaluatorModifyFoo]
entry = get_ret_val(sheerka, Concept("foo"))
return_values = sheerka.execute(get_context(sheerka), entry, [BuiltinConcepts.EVALUATION])
assert BuiltinConcepts.EVALUATION not in [r.body.key for r in return_values]
def test_that_higher_priority_evaluators_are_evaluated_first():
sheerka = get_sheerka()
sheerka.evaluators = [EvaluatorWithPriority20, EvaluatorWithPriority10, EvaluatorWithPriority15]
entries = [get_ret_val(sheerka, Concept("foo"))]
EvaluatorWithPriority.out = []
sheerka.execute(get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
assert EvaluatorWithPriority.out == [
'__EVALUATION [0] priority20 - matches - target=foo',
'__EVALUATION [0] priority20 - eval - target=foo',
'__EVALUATION [0] priority20 - matches - target=__EVALUATION',
'__EVALUATION [0] priority20 - eval - target=__EVALUATION',
'__EVALUATION [0] priority15 - matches - target=foo',
'__EVALUATION [0] priority15 - eval - target=foo',
'__EVALUATION [0] priority15 - matches - target=__EVALUATION',
'__EVALUATION [0] priority15 - eval - target=__EVALUATION',
'__EVALUATION [0] priority10 - matches - target=foo',
'__EVALUATION [0] priority10 - eval - target=foo',
'__EVALUATION [0] priority10 - matches - target=__EVALUATION',
'__EVALUATION [0] priority10 - eval - target=__EVALUATION']
def test_that_predicate_is_checked_before_evaluation():
sheerka = get_sheerka()
sheerka.evaluators = [EvaluatorModifyFoo]
entries = [get_ret_val(sheerka, Concept("foo")), get_ret_val(sheerka, Concept("baz"))]
EvaluatorWithPriority.out = []
sheerka.execute(get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
assert EvaluatorWithPriority.out == [
'__EVALUATION [0] modifyFoo - matches - target=foo',
'__EVALUATION [0] modifyFoo - eval - target=foo',
'__EVALUATION [0] modifyFoo - matches - target=baz',
'__EVALUATION [0] modifyFoo - matches - target=__EVALUATION',
'__EVALUATION [1] modifyFoo - matches - target=bar',
'__EVALUATION [1] modifyFoo - matches - target=baz',
'__EVALUATION [1] modifyFoo - matches - target=__EVALUATION'
]
def test_evaluation_continue_until_no_more_modification():
sheerka = get_sheerka()
sheerka.evaluators = [EvaluatorModifyFoo, EvaluatorModifyBar]
entries = [get_ret_val(sheerka, Concept("foo")), get_ret_val(sheerka, Concept("baz"))]
EvaluatorWithPriority.out = []
sheerka.execute(get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
assert EvaluatorWithPriority.out == [
'__EVALUATION [0] modifyFoo - matches - target=foo',
'__EVALUATION [0] modifyFoo - eval - target=foo',
'__EVALUATION [0] modifyFoo - matches - target=baz',
'__EVALUATION [0] modifyFoo - matches - target=__EVALUATION',
'__EVALUATION [0] modifyBar - matches - target=foo',
'__EVALUATION [0] modifyBar - matches - target=baz',
'__EVALUATION [0] modifyBar - matches - target=__EVALUATION',
'__EVALUATION [1] modifyFoo - matches - target=bar',
'__EVALUATION [1] modifyFoo - matches - target=baz',
'__EVALUATION [1] modifyFoo - matches - target=__EVALUATION',
'__EVALUATION [1] modifyBar - matches - target=bar',
'__EVALUATION [1] modifyBar - eval - target=bar',
'__EVALUATION [1] modifyBar - matches - target=baz',
'__EVALUATION [1] modifyBar - matches - target=__EVALUATION',
'__EVALUATION [2] modifyFoo - matches - target=baz',
'__EVALUATION [2] modifyFoo - matches - target=baz',
'__EVALUATION [2] modifyFoo - matches - target=__EVALUATION',
'__EVALUATION [2] modifyBar - matches - target=baz',
'__EVALUATION [2] modifyBar - matches - target=baz',
'__EVALUATION [2] modifyBar - matches - target=__EVALUATION'
]