Refactored sheerka execution flow + Enhanced log management
This commit is contained in:
+56
-31
@@ -11,46 +11,71 @@ class BuiltinConcepts(Enum):
|
||||
The key if the name of the concept
|
||||
The id is a sequential number given just before the concept is saved in sdp
|
||||
|
||||
The values of the enum are just a convenient way for me to group the concepts
|
||||
The values of the enum is not used the code
|
||||
"""
|
||||
SHEERKA = 1
|
||||
SUCCESS = 2
|
||||
ERROR = 3
|
||||
UNKNOWN_CONCEPT = 4 # the request concept is not recognized
|
||||
RETURN_VALUE = 5 # a value is returned
|
||||
CONCEPT_TOO_LONG = 6 # concept cannot be processed by exactConcept parser
|
||||
NEW_CONCEPT = 7 # when a new concept is added
|
||||
UNKNOWN_PROPERTY = 8 # when requesting for a unknown property
|
||||
PARSER_RESULT = 9
|
||||
TOO_MANY_SUCCESS = 10 # when expecting a limited number of successful return value
|
||||
TOO_MANY_ERRORS = 11 # when expecting a limited number of successful return value
|
||||
NOT_FOR_ME = 12 # a parser recognize that the entry is not meant for it
|
||||
IS_EMPTY = 13 # when a set is empty
|
||||
INVALID_RETURN_VALUE = 14 # the return value of an evaluator is not correct
|
||||
BEFORE_PARSING = 15 # activated before evaluation by the parsers
|
||||
PARSING = 16 # activated during the parsing. It contains the text to parse
|
||||
AFTER_PARSING = 17 # after parsing
|
||||
BEFORE_EVALUATION = 18 # before evaluation
|
||||
EVALUATION = 19 # activated when the parsing process seems to be finished
|
||||
AFTER_EVALUATION = 20 # activated when the parsing process seems to be finished
|
||||
CONCEPT_ALREADY_DEFINED = 21 # when you try to add the same concept twice
|
||||
NOP = 22 # no operation concept. Does nothing
|
||||
PROPERTY_EVAL_ERROR = 23 # cannot evaluate a property of a concept
|
||||
ENUMERATION = 24 # represents a list or a set
|
||||
LIST = 25 # represents a list
|
||||
CANNOT_RESOLVE_VALUE_ERROR = 26 # In presence of a concept where the default value is not know
|
||||
SHEERKA = "sheerka"
|
||||
|
||||
NODE = 200
|
||||
GENERIC_NODE = 201
|
||||
IDENTIFIER_NODE = 202
|
||||
BEFORE_PARSING = "before parsing" # activated before evaluation by the parsers
|
||||
PARSING = "parsing" # activated during the parsing. It contains the text to parse
|
||||
AFTER_PARSING = "after parsing" # after parsing
|
||||
BEFORE_EVALUATION = "before evaluation" # before evaluation
|
||||
EVALUATION = "evaluation" # activated when the parsing process seems to be finished
|
||||
AFTER_EVALUATION = "after evaluation" # activated when the parsing process seems to be finished
|
||||
BEFORE_RENDERING = "before rendering" # activate before the output is rendered
|
||||
RENDERING = "rendering" # rendering the response from sheerka
|
||||
AFTER_RENDERING = "after rendering" # rendering the response from sheerka
|
||||
|
||||
USER_INPUT = "user input" # represent an input from an user
|
||||
SUCCESS = "success"
|
||||
ERROR = "error"
|
||||
UNKNOWN_CONCEPT = "unknown concept" # the request concept is not recognized
|
||||
RETURN_VALUE = "return value" # a value is returned
|
||||
CONCEPT_TOO_LONG = "concept too long" # concept cannot be processed by exactConcept parser
|
||||
NEW_CONCEPT = "new concept" # when a new concept is added
|
||||
UNKNOWN_PROPERTY = "unknown property" # when requesting for a unknown property
|
||||
PARSER_RESULT = "parser result"
|
||||
TOO_MANY_SUCCESS = "too many success" # when expecting a limited number of successful return value
|
||||
TOO_MANY_ERRORS = "too many errors" # when expecting a limited number of successful return value
|
||||
NOT_FOR_ME = "not for me" # a parser recognize that the entry is not meant for it
|
||||
IS_EMPTY = "is empty" # when a set is empty
|
||||
INVALID_RETURN_VALUE = "invalid return value" # the return value of an evaluator is not correct
|
||||
CONCEPT_ALREADY_DEFINED = "concept already defined" # when you try to add the same concept twice
|
||||
NOP = "no operation" # no operation concept. Does nothing
|
||||
PROPERTY_EVAL_ERROR = "property evaluation error" # cannot evaluate a property of a concept
|
||||
ENUMERATION = "enum" # represents a list or a set
|
||||
LIST = "list" # represents a list
|
||||
CANNOT_RESOLVE_VALUE_ERROR = "value cannot be resolved" # don't know how to find concept value
|
||||
|
||||
NODE = "node"
|
||||
GENERIC_NODE = "generic node"
|
||||
IDENTIFIER_NODE = "identifier node"
|
||||
|
||||
def __repr__(self):
|
||||
return "__" + self.name
|
||||
|
||||
def __str__(self):
|
||||
return "__" + self.name
|
||||
|
||||
"""
|
||||
Some concepts have a specific implementation
|
||||
It's mainly to a have proper __repr__ implementation, or redefine the is_unique attribut
|
||||
It's mainly to a have proper __repr__ implementation, or because they are singleton (is_unique=True)
|
||||
"""
|
||||
|
||||
|
||||
class UserInputConcept(Concept):
|
||||
def __init__(self, text=None, user_name=None):
|
||||
super().__init__(BuiltinConcepts.USER_INPUT, True, False, BuiltinConcepts.USER_INPUT, text)
|
||||
self.set_prop("user_name", user_name)
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
return self.body
|
||||
|
||||
@property
|
||||
def user_name(self):
|
||||
return self.props["user_name"].value
|
||||
|
||||
|
||||
class SuccessConcept(Concept):
|
||||
def __init__(self):
|
||||
super().__init__(BuiltinConcepts.SUCCESS, True, True, BuiltinConcepts.SUCCESS)
|
||||
|
||||
+3
-2
@@ -1,12 +1,11 @@
|
||||
import hashlib
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
import logging
|
||||
from core.sheerka_logger import get_logger
|
||||
|
||||
import core.utils
|
||||
from core.tokenizer import Tokenizer, TokenKind
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
PROPERTIES_FOR_DIGEST = ("name", "key",
|
||||
"definition", "definition_type",
|
||||
@@ -86,6 +85,8 @@ class Concept:
|
||||
self.props = {} # list of Property for this concept
|
||||
self.cached_asts = {} # cached ast for the where, pre, post and body parts
|
||||
self.bnf = None
|
||||
self.log = get_logger("core." + self.__class__.__name__)
|
||||
self.init_log = get_logger("init.core." + self.__class__.__name__)
|
||||
|
||||
def __repr__(self):
|
||||
return f"({self.metadata.id}){self.metadata.name}"
|
||||
|
||||
+247
-118
@@ -2,20 +2,18 @@ from dataclasses import dataclass, field
|
||||
|
||||
from core.builtin_concepts import BuiltinConcepts, ErrorConcept, ReturnValueConcept
|
||||
from core.concept import Concept, ConceptParts, PROPERTIES_FOR_DIGEST
|
||||
from evaluators.BaseEvaluator import OneReturnValueEvaluator
|
||||
from parsers.BaseParser import BaseParser
|
||||
from sdp.sheerkaDataProvider import SheerkaDataProvider, Event, SheerkaDataProviderDuplicateKeyError
|
||||
import core.utils
|
||||
import core.builtin_helpers
|
||||
|
||||
import logging
|
||||
from core.sheerka_logger import console_handler, get_logger
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
init_log = logging.getLogger(__name__ + ".init")
|
||||
import logging
|
||||
|
||||
concept_evaluation_steps = [BuiltinConcepts.EVALUATION, BuiltinConcepts.AFTER_EVALUATION]
|
||||
CONCEPT_LEXER_PARSER_CLASS = "parsers.ConceptLexerParser.ConceptLexerParser"
|
||||
|
||||
DEBUG_TAB_SIZE = 4
|
||||
|
||||
class Sheerka(Concept):
|
||||
"""
|
||||
@@ -27,14 +25,17 @@ class Sheerka(Concept):
|
||||
BUILTIN_CONCEPTS_KEYS = "Builtins_Concepts" # sequential key for builtin concepts
|
||||
USER_CONCEPTS_KEYS = "User_Concepts" # sequential key for user defined concepts
|
||||
|
||||
def __init__(self, debug=False, skip_builtins_in_db=False, loggers=None):
|
||||
log.debug("Starting Sheerka.")
|
||||
def __init__(self, skip_builtins_in_db=False, debug=False, loggers=None):
|
||||
self.init_logging(debug, loggers)
|
||||
|
||||
super().__init__(BuiltinConcepts.SHEERKA, True, True, BuiltinConcepts.SHEERKA)
|
||||
self.log.debug("Starting Sheerka.")
|
||||
|
||||
# cache of the most used concepts
|
||||
# Note that these are only templates
|
||||
# They are used as a footprint for instantiation
|
||||
# Except of source when the concept is supposed to be unique
|
||||
# key is the key of the concept (not the name or the id)
|
||||
self.concepts_cache = {}
|
||||
|
||||
#
|
||||
@@ -64,8 +65,6 @@ class Sheerka(Concept):
|
||||
self.evaluators_prefix: str = None
|
||||
self.parsers_prefix: str = None
|
||||
|
||||
self.debug = debug
|
||||
self.loggers = loggers or []
|
||||
self.skip_builtins_in_db = skip_builtins_in_db
|
||||
|
||||
def initialize(self, root_folder: str = None):
|
||||
@@ -78,8 +77,6 @@ class Sheerka(Concept):
|
||||
"""
|
||||
|
||||
try:
|
||||
self.init_logging()
|
||||
|
||||
self.sdp = SheerkaDataProvider(root_folder)
|
||||
if self.sdp.first_time:
|
||||
self.sdp.set_key(self.USER_CONCEPTS_KEYS, 1000)
|
||||
@@ -102,7 +99,7 @@ class Sheerka(Concept):
|
||||
Initializes the builtin concepts
|
||||
:return: None
|
||||
"""
|
||||
init_log.debug("Initializing builtin concepts")
|
||||
self.init_log.debug("Initializing builtin concepts")
|
||||
builtins_classes = self.get_builtins_classes_as_dict()
|
||||
|
||||
# this all initialization of the builtins seems to be little bit complicated
|
||||
@@ -118,11 +115,11 @@ class Sheerka(Concept):
|
||||
if not self.skip_builtins_in_db:
|
||||
from_db = self.sdp.get_safe(self.CONCEPTS_ENTRY, concept.metadata.key)
|
||||
if from_db is None:
|
||||
init_log.debug(f"'{concept.name}' concept is not found in db. Adding.")
|
||||
self.init_log.debug(f"'{concept.name}' concept is not found in db. Adding.")
|
||||
self.set_id_if_needed(concept, True)
|
||||
self.sdp.add("init", self.CONCEPTS_ENTRY, concept, use_ref=True)
|
||||
else:
|
||||
init_log.debug(f"Found concept '{from_db}' in db. Updating.")
|
||||
self.init_log.debug(f"Found concept '{from_db}' in db. Updating.")
|
||||
concept.update_from(from_db)
|
||||
|
||||
self.add_in_cache(concept)
|
||||
@@ -132,12 +129,13 @@ class Sheerka(Concept):
|
||||
Init the parsers
|
||||
:return:
|
||||
"""
|
||||
core.utils.init_package_import("parsers")
|
||||
base_class = core.utils.get_class("parsers.BaseParser.BaseParser")
|
||||
for parser in core.utils.get_sub_classes("parsers", base_class):
|
||||
if parser.__module__ == base_class.__module__:
|
||||
continue
|
||||
|
||||
init_log.debug(f"Adding builtin parser '{parser.__name__}'")
|
||||
self.init_log.debug(f"Adding builtin parser '{parser.__name__}'")
|
||||
self.parsers[core.utils.get_full_qualified_name(parser)] = parser
|
||||
|
||||
def initialize_builtin_evaluators(self):
|
||||
@@ -145,117 +143,113 @@ class Sheerka(Concept):
|
||||
Init the evaluators
|
||||
:return:
|
||||
"""
|
||||
core.utils.init_package_import("evaluators")
|
||||
for evaluator in core.utils.get_sub_classes("evaluators", "evaluators.BaseEvaluator.OneReturnValueEvaluator"):
|
||||
init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
|
||||
self.init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
|
||||
self.evaluators.append(evaluator)
|
||||
|
||||
for evaluator in core.utils.get_sub_classes("evaluators", "evaluators.BaseEvaluator.AllReturnValuesEvaluator"):
|
||||
init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
|
||||
self.init_log.debug(f"Adding builtin evaluator '{evaluator.__name__}'")
|
||||
self.evaluators.append(evaluator)
|
||||
|
||||
def initialize_concepts_definitions(self, execution_context):
|
||||
init_log.debug("Initializing concepts definitions")
|
||||
self.init_log.debug("Initializing concepts definitions")
|
||||
definitions = self.sdp.get_safe(self.CONCEPTS_DEFINITIONS_ENTRY, load_origin=False)
|
||||
|
||||
if definitions is None:
|
||||
init_log.debug("No BNF defined")
|
||||
self.init_log.debug("No BNF defined")
|
||||
return
|
||||
|
||||
lexer_parser = self.parsers[CONCEPT_LEXER_PARSER_CLASS]()
|
||||
ret_val = lexer_parser.initialize(execution_context, definitions)
|
||||
if not ret_val.status:
|
||||
init_log.error("Failed to initialize concepts definitions " + str(ret_val.body))
|
||||
self.init_log.error("Failed to initialize concepts definitions " + str(ret_val.body))
|
||||
return
|
||||
|
||||
self.concepts_grammars = lexer_parser.concepts_grammars
|
||||
|
||||
def init_logging(self):
|
||||
def _logger_filter(record: logging.LogRecord):
|
||||
if 'all' in self.loggers:
|
||||
return True
|
||||
|
||||
ret = True
|
||||
if 'init' not in self.loggers and record.name.endswith(".init"):
|
||||
ret = False
|
||||
|
||||
return ret
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.addFilter(_logger_filter)
|
||||
if self.debug:
|
||||
log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
|
||||
log_level = logging.DEBUG
|
||||
else:
|
||||
log_format = "%(message)s"
|
||||
log_level = logging.INFO
|
||||
|
||||
logging.basicConfig(format=log_format, level=log_level, handlers=[handler])
|
||||
|
||||
def eval(self, text: str):
|
||||
def evaluate_user_input(self, text: str, user_name="kodjo"):
|
||||
"""
|
||||
Note to KSI: If you try to add execution context to this function,
|
||||
You may end in an infinite loop
|
||||
:param text:
|
||||
:param user_name:
|
||||
:return:
|
||||
"""
|
||||
log.debug(f"Evaluating '{text}'.")
|
||||
evt_digest = self.sdp.save_event(Event(text))
|
||||
log.debug(f"{evt_digest=}")
|
||||
exec_context = ExecutionContext(self.key, evt_digest, self)
|
||||
self.log.debug(f"Processing user input '{text}', {user_name=}.")
|
||||
evt_digest = self.sdp.save_event(Event(text, user_name))
|
||||
self.log.debug(f"{evt_digest=}")
|
||||
execution_context = ExecutionContext(self.key, evt_digest, self)
|
||||
|
||||
# Before parsing
|
||||
before_parsing = self.new(BuiltinConcepts.BEFORE_PARSING)
|
||||
return_values = self.process(exec_context, [], [before_parsing])
|
||||
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == before_parsing)
|
||||
user_input = self.ret(self.name, True, self.new(BuiltinConcepts.USER_INPUT, body=text, user_name=user_name))
|
||||
steps = [
|
||||
BuiltinConcepts.BEFORE_PARSING,
|
||||
BuiltinConcepts.PARSING,
|
||||
BuiltinConcepts.EVALUATION,
|
||||
BuiltinConcepts.AFTER_EVALUATION
|
||||
]
|
||||
|
||||
# parse
|
||||
parsing_results = self.parse(exec_context, text)
|
||||
return_values.extend(parsing_results)
|
||||
return self.execute(execution_context, user_input, steps)
|
||||
|
||||
# evaluate
|
||||
evaluating = self.new(BuiltinConcepts.EVALUATION)
|
||||
return_values = self.process(exec_context, return_values, [evaluating])
|
||||
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == evaluating)
|
||||
def _call_parsers(self, execution_context, return_values, logger=None):
|
||||
|
||||
# post evaluation
|
||||
after_evaluation = self.new(BuiltinConcepts.AFTER_EVALUATION)
|
||||
return_values = self.process(exec_context, return_values, [after_evaluation])
|
||||
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == after_evaluation)
|
||||
|
||||
return return_values
|
||||
|
||||
def parse(self, context, text):
|
||||
result = []
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
debug_text = "'" + text + "'" if isinstance(text, str) \
|
||||
else "'" + BaseParser.get_text_from_tokens(text) + "' as tokens"
|
||||
log.debug(f"Parsing {debug_text}")
|
||||
for parser in self.parsers.values():
|
||||
p = parser(sheerka=self)
|
||||
res = p.parse(context, text)
|
||||
if isinstance(res, list):
|
||||
result.extend(res)
|
||||
else:
|
||||
result.append(res)
|
||||
# return_values must be a list
|
||||
if not isinstance(return_values, list):
|
||||
return_values = [return_values]
|
||||
|
||||
for return_value in return_values:
|
||||
if not return_value.status or not self.isinstance(return_value.body, BuiltinConcepts.USER_INPUT):
|
||||
continue
|
||||
|
||||
to_parse = self.value(return_value)
|
||||
|
||||
if self.log.isEnabledFor(logging.DEBUG):
|
||||
debug_text = "'" + to_parse + "'" if isinstance(to_parse, str) \
|
||||
else "'" + BaseParser.get_text_from_tokens(to_parse) + "' as tokens"
|
||||
# self.log.debug(f"Parsing {debug_text}")
|
||||
for parser in self.parsers.values():
|
||||
p = parser(sheerka=self)
|
||||
if logger:
|
||||
p.log = logger
|
||||
res = p.parse(execution_context, to_parse)
|
||||
|
||||
if hasattr(res, "__iter__"):
|
||||
for r in res:
|
||||
r.parents = [return_value]
|
||||
result.append(r)
|
||||
else:
|
||||
res.parents = [return_value]
|
||||
result.append(res)
|
||||
|
||||
return result
|
||||
|
||||
def process(self, context, return_values, initial_concepts=None):
|
||||
log.debug(f"{initial_concepts=}. Processing " + core.utils.pp(return_values))
|
||||
def _call_evaluators(self, execution_context, return_values, process_step, evaluation_context=None):
|
||||
"""
|
||||
|
||||
"""
|
||||
|
||||
# return_values must be a list
|
||||
if not isinstance(return_values, list):
|
||||
return_values = [return_values]
|
||||
|
||||
# adds contextual concepts
|
||||
if initial_concepts:
|
||||
for concept in initial_concepts:
|
||||
return_values.append(self.ret(context.who, True, concept))
|
||||
# evaluation context are contexts that may modify the behaviour of the execution
|
||||
# They first need to be transformed into return values
|
||||
if evaluation_context is None:
|
||||
evaluation_return_values = []
|
||||
else:
|
||||
evaluation_return_values = [self.ret(execution_context.who, True, c) for c in evaluation_context]
|
||||
|
||||
# add the current step as part as the evaluation context
|
||||
evaluation_return_values.append(self.ret(execution_context.who, True, self.new(process_step)))
|
||||
|
||||
# the pool of return values are the mix
|
||||
return_values.extend(evaluation_return_values)
|
||||
|
||||
# group the evaluators by priority and sort them
|
||||
# The first one to be applied will be the one with the highest priority
|
||||
grouped_evaluators = {}
|
||||
all_evaluators = [e() for e in self.evaluators]
|
||||
for evaluator in [e for e in all_evaluators if e.enabled]:
|
||||
for evaluator in [e() for e in self.evaluators if e.enabled]:
|
||||
grouped_evaluators.setdefault(evaluator.priority, []).append(evaluator)
|
||||
sorted_priorities = sorted(grouped_evaluators.keys(), reverse=True)
|
||||
|
||||
@@ -264,19 +258,18 @@ class Sheerka(Concept):
|
||||
simple_digest = return_values[:]
|
||||
|
||||
for priority in sorted_priorities:
|
||||
# log.debug("Processing priority " + str(priority))
|
||||
# for item in return_values:
|
||||
# log.debug(item)
|
||||
|
||||
original_items = return_values[:]
|
||||
evaluated_items = []
|
||||
to_delete = []
|
||||
for evaluator in grouped_evaluators[priority]:
|
||||
|
||||
# process evaluators that work on return value
|
||||
from evaluators.BaseEvaluator import OneReturnValueEvaluator
|
||||
if isinstance(evaluator, OneReturnValueEvaluator):
|
||||
for item in original_items:
|
||||
if evaluator.matches(context, item):
|
||||
result = evaluator.eval(context, item)
|
||||
if evaluator.matches(execution_context, item):
|
||||
result = evaluator.eval(execution_context, item)
|
||||
if result is None:
|
||||
continue
|
||||
elif isinstance(result, list):
|
||||
@@ -292,8 +285,8 @@ class Sheerka(Concept):
|
||||
to_delete.append(item)
|
||||
# process evaluators that work on all return values
|
||||
else:
|
||||
if evaluator.matches(context, original_items):
|
||||
results = evaluator.eval(context, original_items)
|
||||
if evaluator.matches(execution_context, original_items):
|
||||
results = evaluator.eval(execution_context, original_items)
|
||||
if results is None:
|
||||
continue
|
||||
if not isinstance(results, list):
|
||||
@@ -306,27 +299,36 @@ class Sheerka(Concept):
|
||||
return_values.extend([item for item in original_items if item not in to_delete])
|
||||
|
||||
# have we done something ?
|
||||
to_compare = return_values[:] # set(id(r) for r in return_values)
|
||||
to_compare = return_values[:]
|
||||
if simple_digest == to_compare:
|
||||
break
|
||||
|
||||
# inc the iteration and continue
|
||||
execution_context = execution_context.push(iteration=execution_context.iteration + 1)
|
||||
|
||||
# remove all evaluation context that are not reduced
|
||||
return_values = core.utils.remove_list_from_list(return_values, evaluation_return_values)
|
||||
return return_values
|
||||
|
||||
def chain_process(self, context, return_values, initial_concepts):
|
||||
def execute(self, execution_context, return_values, execution_steps, logger=None):
|
||||
"""
|
||||
Executes process for all initial contexts
|
||||
:param context:
|
||||
:param return_values:
|
||||
:param initial_concepts:
|
||||
:return:
|
||||
:param execution_context:
|
||||
:param return_values:
|
||||
:param execution_steps:
|
||||
:param logger: logger to use (if not directly called by sheerka)
|
||||
:return:
|
||||
"""
|
||||
for concept in initial_concepts:
|
||||
if isinstance(concept, BuiltinConcepts):
|
||||
concept = self.new(BuiltinConcepts)
|
||||
|
||||
init = [self.ret(context.who, True, concept)]
|
||||
return_values = self.process(context, return_values, [init])
|
||||
return_values = core.utils.remove_from_list(return_values, lambda x: x.value == init)
|
||||
for step in execution_steps:
|
||||
sub_context = execution_context.push(step=step)
|
||||
sub_context.log(logger or self.log, f"{step=}, context='{sub_context}'")
|
||||
if step == BuiltinConcepts.PARSING:
|
||||
return_values = self._call_parsers(sub_context, return_values, logger)
|
||||
else:
|
||||
return_values = self._call_evaluators(sub_context, return_values, step)
|
||||
|
||||
sub_context.log_result(logger or self.log, return_values)
|
||||
|
||||
return return_values
|
||||
|
||||
@@ -340,7 +342,7 @@ class Sheerka(Concept):
|
||||
if obj.metadata.id is not None:
|
||||
return
|
||||
obj.metadata.id = self.sdp.get_next_key(self.BUILTIN_CONCEPTS_KEYS if is_builtin else self.USER_CONCEPTS_KEYS)
|
||||
log.debug(f"Setting id '{obj.metadata.id}' to concept '{obj.metadata.name}'.")
|
||||
self.log.debug(f"Setting id '{obj.metadata.id}' to concept '{obj.metadata.name}'.")
|
||||
|
||||
def create_new_concept(self, context, concept: Concept):
|
||||
"""
|
||||
@@ -369,8 +371,8 @@ class Sheerka(Concept):
|
||||
|
||||
# check if it's a valid BNF or whether it breaks the known rules
|
||||
concept_lexer_parser = self.parsers[CONCEPT_LEXER_PARSER_CLASS](grammars=self.concepts_grammars.copy())
|
||||
sub_context = context.push(self.name, "Initializing concept definition")
|
||||
sub_context.concepts_cache[concept.key] = concept # the concept is not in the real cache yet
|
||||
sub_context = context.push(self.name, desc="Initializing concept definition")
|
||||
sub_context.concepts[concept.key] = concept # the concept is not in the real cache yet
|
||||
init_ret_value = concept_lexer_parser.initialize(sub_context, concepts_definitions)
|
||||
if not init_ret_value.status:
|
||||
return self.ret(self.create_new_concept.__name__, False, ErrorConcept(init_ret_value.value))
|
||||
@@ -402,6 +404,8 @@ class Sheerka(Concept):
|
||||
:param context:
|
||||
:return:
|
||||
"""
|
||||
# steps = [BuiltinConcepts.BEFORE_PARSING, BuiltinConcepts.PARSING, BuiltinConcepts.AFTER_PARSING]
|
||||
steps = [BuiltinConcepts.PARSING]
|
||||
for part_key in ConceptParts:
|
||||
source = getattr(concept.metadata, part_key.value)
|
||||
if source is None or not isinstance(source, str) or source == "":
|
||||
@@ -409,10 +413,12 @@ class Sheerka(Concept):
|
||||
# I refuse empty strings for performance, I don't want to handle useless NOPConcepts
|
||||
continue
|
||||
else:
|
||||
concept.cached_asts[part_key] = self.parse(context, source)
|
||||
to_parse = self.ret(context.who, True, self.new(BuiltinConcepts.USER_INPUT, body=source))
|
||||
concept.cached_asts[part_key] = self.execute(context, to_parse, steps)
|
||||
|
||||
for prop in concept.props:
|
||||
concept.cached_asts[prop] = self.parse(context, concept.props[prop].value)
|
||||
to_parse = self.ret(context.who, True, self.new(BuiltinConcepts.USER_INPUT, body=concept.props[prop].value))
|
||||
concept.cached_asts[prop] = self.execute(context, to_parse, steps)
|
||||
|
||||
# updates the code of the reference when possible
|
||||
if concept.key in self.concepts_cache:
|
||||
@@ -446,7 +452,7 @@ class Sheerka(Concept):
|
||||
part_key = ConceptParts(prop)
|
||||
if concept.cached_asts[part_key] is None:
|
||||
continue
|
||||
res = self.chain_process(context, concept.cached_asts[part_key], concept_evaluation_steps)
|
||||
res = self.execute(context, concept.cached_asts[part_key], concept_evaluation_steps)
|
||||
res = core.builtin_helpers.expect_one(context, res)
|
||||
setattr(concept.metadata, prop, res.value)
|
||||
|
||||
@@ -564,6 +570,11 @@ class Sheerka(Concept):
|
||||
if obj is None:
|
||||
return None
|
||||
|
||||
if self.isinstance(obj, BuiltinConcepts.RETURN_VALUE) and \
|
||||
obj.status and \
|
||||
self.isinstance(obj.value, BuiltinConcepts.USER_INPUT):
|
||||
return obj.value.text
|
||||
|
||||
if not isinstance(obj, Concept):
|
||||
return obj
|
||||
|
||||
@@ -664,6 +675,19 @@ class Sheerka(Concept):
|
||||
def test(self):
|
||||
return f"I have access to Sheerka !"
|
||||
|
||||
def test_error(self):
|
||||
raise Exception("I can raise an error")
|
||||
|
||||
def dump_concepts(self):
|
||||
lst = self.sdp.list(self.CONCEPTS_ENTRY)
|
||||
for item in lst:
|
||||
if hasattr(item, "__iter__"):
|
||||
for i in item:
|
||||
self.log.info(i)
|
||||
else:
|
||||
self.log.info(item)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_builtins_classes_as_dict():
|
||||
res = {}
|
||||
@@ -673,18 +697,123 @@ class Sheerka(Concept):
|
||||
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def init_logging(debug, loggers):
|
||||
core.sheerka_logger.set_enabled(loggers)
|
||||
if debug:
|
||||
# log_format = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
|
||||
log_format = "%(asctime)s [%(levelname)s] %(message)s"
|
||||
log_level = logging.DEBUG
|
||||
else:
|
||||
log_format = "%(message)s"
|
||||
log_level = logging.INFO
|
||||
|
||||
# logging.root.setLevel(log_level)
|
||||
# fmt = logging.Formatter(log_format, None, "%")
|
||||
# console_handler.setFormatter(fmt)
|
||||
|
||||
logging.basicConfig(format=log_format, level=log_level, handlers=[console_handler])
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutionContext:
|
||||
"""
|
||||
To keep track of the execution of a request
|
||||
"""
|
||||
who: object # who is asking
|
||||
event_digest: str # what was the (original) trigger
|
||||
sheerka: Sheerka # sheerka
|
||||
desc: str = None # human description of what is going on
|
||||
obj: Concept = None # what is the subject of the execution context (if known)
|
||||
concepts_cache: dict = field(default_factory=dict)
|
||||
|
||||
def push(self, who, desc=None, obj=None):
|
||||
return ExecutionContext(who, self.event_digest, self.sheerka, desc=desc, obj=obj)
|
||||
def __init__(self,
|
||||
who,
|
||||
event_digest: str,
|
||||
sheerka: Sheerka,
|
||||
/,
|
||||
desc: str = None,
|
||||
obj: Concept = None,
|
||||
step: BuiltinConcepts = None,
|
||||
iteration: int = 0,
|
||||
concepts: dict = None):
|
||||
|
||||
self.who = who # who is asking
|
||||
self.event_digest = event_digest # what was the (original) trigger
|
||||
self.sheerka = sheerka # sheerka
|
||||
|
||||
self.step = step
|
||||
self.iteration = iteration
|
||||
|
||||
self.desc = desc # human description of what is going on
|
||||
self.obj = obj # what is the subject of the execution context (if known)
|
||||
|
||||
self.concepts = concepts or {}
|
||||
|
||||
self._id = ExecutionContextIdManager.get_id(event_digest)
|
||||
self._tab = ""
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self._id
|
||||
|
||||
def push(self, who=None, /, **kwargs):
|
||||
who = who or self.who
|
||||
desc = kwargs.get("desc", "")
|
||||
obj = kwargs.get("obj", self.obj)
|
||||
concepts = kwargs.get("concepts", self.concepts)
|
||||
step = kwargs.get("step", self.step)
|
||||
iteration = kwargs.get("iteration", self.iteration)
|
||||
new = ExecutionContext(
|
||||
who,
|
||||
self.event_digest,
|
||||
self.sheerka,
|
||||
desc=desc,
|
||||
obj=obj,
|
||||
concepts=concepts,
|
||||
step=step,
|
||||
iteration=iteration,
|
||||
)
|
||||
new._tab = self._tab + " " * DEBUG_TAB_SIZE
|
||||
return new
|
||||
|
||||
def log_new(self, logger):
|
||||
logger.debug(f"[{self._id:2}]" + self._tab + str(self))
|
||||
|
||||
def log(self, logger, message, who=None):
|
||||
logger.debug(f"[{self._id:2}]" + self._tab + (f"[{who}] " if who else "") + str(message))
|
||||
|
||||
def log_error(self, logger, message, who=None):
|
||||
logger.exception(f"[{self._id:2}]" + self._tab + (f"[{who}] " if who else "") + str(message))
|
||||
|
||||
def log_result(self, logger, return_values):
|
||||
if not logger.isEnabledFor(logging.DEBUG):
|
||||
return
|
||||
|
||||
if len(return_values) == 0:
|
||||
logger.debug(self._tab + "No return value")
|
||||
|
||||
for r in return_values:
|
||||
to_str = self.return_value_to_str(r)
|
||||
logger.debug(f"[{self._id:2}]" + self._tab + "-> " + to_str)
|
||||
|
||||
@staticmethod
|
||||
def return_value_to_str(r):
|
||||
value = str(r.value)
|
||||
if len(value) > 50:
|
||||
value = value[:47] + "..."
|
||||
to_str = f"ReturnValue(who={r.who}, status={r.status}, value={value})"
|
||||
return to_str
|
||||
|
||||
def __repr__(self):
|
||||
msg = f"ExecutionContext(who={self.who}, id={self._id}"
|
||||
if self.desc:
|
||||
msg += f", desc='{self.desc}'"
|
||||
msg += ")"
|
||||
return msg
|
||||
|
||||
|
||||
class ExecutionContextIdManager:
|
||||
ids = {}
|
||||
|
||||
@staticmethod
|
||||
def get_id(event_digest):
|
||||
if event_digest in ExecutionContextIdManager.ids:
|
||||
ExecutionContextIdManager.ids[event_digest] += 1
|
||||
else:
|
||||
ExecutionContextIdManager.ids[event_digest] = 0
|
||||
return ExecutionContextIdManager.ids[event_digest]
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
import logging
|
||||
|
||||
enabled = []
|
||||
disabled = ["init", "sdp", "parsers", "evaluators", "verbose"]
|
||||
|
||||
console_handler = logging.StreamHandler()
|
||||
|
||||
all_loggers = {}
|
||||
|
||||
|
||||
def set_enabled(to_enable):
|
||||
if to_enable is None:
|
||||
return
|
||||
|
||||
if not hasattr(to_enable, "__iter__"):
|
||||
to_enable = [to_enable]
|
||||
|
||||
enabled.extend(to_enable)
|
||||
|
||||
|
||||
def to_discard(logger_class):
|
||||
if logger_class in enabled:
|
||||
return False
|
||||
|
||||
if logger_class not in disabled:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def get_logger(logger_name):
|
||||
if logger_name in all_loggers:
|
||||
return all_loggers[logger_name]
|
||||
|
||||
logger = logging.getLogger(logger_name)
|
||||
all_loggers[logger_name] = logger
|
||||
|
||||
for d in disabled:
|
||||
if logger_name.startswith(d + ".") and to_discard(d):
|
||||
logger.disabled = True
|
||||
|
||||
for e in enabled:
|
||||
if logger_name.startswith("verbose." + e):
|
||||
logger.disabled = False
|
||||
|
||||
return logger
|
||||
+18
-2
@@ -113,16 +113,21 @@ def get_classes_from_package(package_name):
|
||||
yield c
|
||||
|
||||
|
||||
def get_sub_classes(package_name, base_class):
|
||||
def init_package_import(package_name):
|
||||
pkg = __import__(package_name)
|
||||
prefix = pkg.__name__ + "."
|
||||
for (module_loader, name, ispkg) in pkgutil.iter_modules(pkg.__path__, prefix):
|
||||
importlib.import_module(name)
|
||||
|
||||
|
||||
def get_sub_classes(package_name, base_class):
|
||||
base_class = get_class(base_class) if isinstance(base_class, str) else base_class
|
||||
return set(base_class.__subclasses__()).union(
|
||||
all_class = set(base_class.__subclasses__()).union(
|
||||
[s for c in base_class.__subclasses__() for s in get_sub_classes(package_name, c)])
|
||||
|
||||
# limit to the classes of the package
|
||||
return [c for c in all_class if c.__module__.startswith(package_name)]
|
||||
|
||||
|
||||
def remove_from_list(lst, to_remove_predicate):
|
||||
"""
|
||||
@@ -143,6 +148,17 @@ def remove_from_list(lst, to_remove_predicate):
|
||||
return lst
|
||||
|
||||
|
||||
def remove_list_from_list(lst, to_remove):
|
||||
# https://stackoverflow.com/questions/2514961/remove-all-values-within-one-list-from-another-list/30353802
|
||||
# explains that list comprehension is not the best approach
|
||||
for item in to_remove:
|
||||
try:
|
||||
lst.remove(item)
|
||||
except ValueError:
|
||||
pass
|
||||
return lst
|
||||
|
||||
|
||||
def product(a, b):
|
||||
"""
|
||||
Kind of cartesian product between lists a and b
|
||||
|
||||
Reference in New Issue
Block a user