Fixed #18 : Parsing and evaluating Python
This commit is contained in:
+19
-3
@@ -7,7 +7,7 @@ from os import path
|
|||||||
|
|
||||||
import prompt_toolkit
|
import prompt_toolkit
|
||||||
import requests
|
import requests
|
||||||
from prompt_toolkit import HTML, print_formatted_text, prompt
|
from prompt_toolkit import ANSI, print_formatted_text, prompt
|
||||||
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
||||||
from prompt_toolkit.history import FileHistory
|
from prompt_toolkit.history import FileHistory
|
||||||
from requests import ConnectionError, HTTPError
|
from requests import ConnectionError, HTTPError
|
||||||
@@ -16,6 +16,18 @@ from constants import CLIENT_OPERATION_QUIT, EXIT_COMMANDS
|
|||||||
|
|
||||||
connect_regex = re.compile("connect\(['\"](.*?)['\"]\s*,\s*['\"](.*?)['\"]\)")
|
connect_regex = re.compile("connect\(['\"](.*?)['\"]\s*,\s*['\"](.*?)['\"]\)")
|
||||||
|
|
||||||
|
CONSOLE_COLORS_MAP = {
|
||||||
|
"reset": "\u001b[0m",
|
||||||
|
"black": "\u001b[30m",
|
||||||
|
"red": "\u001b[31m",
|
||||||
|
"green": "\u001b[32m",
|
||||||
|
"yellow": "\u001b[33m",
|
||||||
|
"blue": "\u001b[34m",
|
||||||
|
"magenta": "\u001b[35m",
|
||||||
|
"cyan": "\u001b[36m",
|
||||||
|
"white": "\u001b[37m",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class TestResponse:
|
class TestResponse:
|
||||||
@@ -116,7 +128,11 @@ class SheerkaClient:
|
|||||||
as_json = response.json()
|
as_json = response.json()
|
||||||
|
|
||||||
# Print the response and loop
|
# Print the response and loop
|
||||||
self.print_info(as_json['response'])
|
if as_json['status']:
|
||||||
|
self.print_info(as_json['response'])
|
||||||
|
else:
|
||||||
|
self.print_error(as_json['response'])
|
||||||
|
|
||||||
if as_json['command'] == CLIENT_OPERATION_QUIT:
|
if as_json['command'] == CLIENT_OPERATION_QUIT:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
@@ -142,7 +158,7 @@ class SheerkaClient:
|
|||||||
:return:
|
:return:
|
||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
print_formatted_text(HTML(f'<ansired>{message}</ansired>'))
|
print_formatted_text(ANSI(f'{CONSOLE_COLORS_MAP["red"]}{message}{CONSOLE_COLORS_MAP["reset"]}'))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def print_info(message: str):
|
def print_info(message: str):
|
||||||
|
|||||||
@@ -0,0 +1,106 @@
|
|||||||
|
import ast
|
||||||
|
|
||||||
|
from caching.FastCache import FastCache
|
||||||
|
from common.global_symbols import NotFound
|
||||||
|
|
||||||
|
|
||||||
|
class UnreferencedNamesVisitor(ast.NodeVisitor):
|
||||||
|
"""
|
||||||
|
Try to find symbols that will be requested by the ast
|
||||||
|
It can be variable names, but also function names
|
||||||
|
"""
|
||||||
|
|
||||||
|
cache = FastCache()
|
||||||
|
|
||||||
|
def __init__(self, context):
|
||||||
|
self.context = context
|
||||||
|
self.names = set()
|
||||||
|
|
||||||
|
def get_names(self, node):
|
||||||
|
key = self.__class__.__name__, node
|
||||||
|
names = UnreferencedNamesVisitor.cache.get(key)
|
||||||
|
if names is NotFound:
|
||||||
|
self.visit(node)
|
||||||
|
UnreferencedNamesVisitor.cache.put(key, self.names)
|
||||||
|
return self.names
|
||||||
|
|
||||||
|
return names
|
||||||
|
|
||||||
|
def visit_Name(self, node):
|
||||||
|
self.names.add(node.id)
|
||||||
|
|
||||||
|
def visit_For(self, node: ast.For):
|
||||||
|
self.visit_selected(node, ["body", "orelse"])
|
||||||
|
|
||||||
|
def visit_selected(self, node, to_visit):
|
||||||
|
"""Called if no explicit visitor function exists for a node."""
|
||||||
|
for field in to_visit:
|
||||||
|
value = getattr(node, field)
|
||||||
|
if isinstance(value, list):
|
||||||
|
for item in value:
|
||||||
|
if isinstance(item, ast.AST):
|
||||||
|
self.visit(item)
|
||||||
|
elif isinstance(value, ast.AST):
|
||||||
|
self.visit(value)
|
||||||
|
|
||||||
|
|
||||||
|
class UnreferencedVariablesVisitor(UnreferencedNamesVisitor):
|
||||||
|
"""
|
||||||
|
Try to find variables names that will be requested by the ast
|
||||||
|
This visitor do not yield function names
|
||||||
|
"""
|
||||||
|
|
||||||
|
def visit_Call(self, node: ast.Call):
|
||||||
|
self.visit_selected(node, ["args", "keywords"])
|
||||||
|
|
||||||
|
def visit_keyword(self, node: ast.keyword):
|
||||||
|
self.names.add(node.arg)
|
||||||
|
self.visit_selected(node, ["value"])
|
||||||
|
|
||||||
|
|
||||||
|
class NamesWithAttributesVisitor(ast.NodeVisitor):
|
||||||
|
"""
|
||||||
|
Looks for all attributes for a given name
|
||||||
|
>>> ast_ = ast.parse("foo.bar.baz", "<src>", mode="exec")
|
||||||
|
>>> assert NamesWithAttributesVisitor().get_sequences(ast_, "foo") == [["foo", "bar", "baz"]]
|
||||||
|
|
||||||
|
It parses all expressions / statements
|
||||||
|
>>> ast_ = ast.parse("foo.bar.baz; one.two.three; foo.bar", "<src>", mode="exec")
|
||||||
|
>>> assert NamesWithAttributesVisitor().get_sequences(ast_, "foo") == [["foo", "bar", "baz"], ["foo", "bar"]]
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.sequences = []
|
||||||
|
self.temp = []
|
||||||
|
self.to_lookup = None
|
||||||
|
|
||||||
|
def get_sequences(self, ast_, to_lookup):
|
||||||
|
self.to_lookup = to_lookup
|
||||||
|
self.visit(ast_)
|
||||||
|
return self.sequences
|
||||||
|
|
||||||
|
def visit_Attribute(self, node: ast.Attribute):
|
||||||
|
self.temp.append(node.attr)
|
||||||
|
if isinstance(node.value, ast.Attribute):
|
||||||
|
self.visit_Attribute(node.value)
|
||||||
|
if isinstance(node.value, ast.Subscript):
|
||||||
|
self.visit_Subscript(node.value)
|
||||||
|
elif isinstance(node.value, ast.Name):
|
||||||
|
self.visit_Name(node.value)
|
||||||
|
|
||||||
|
def visit_Subscript(self, node: ast.Subscript):
|
||||||
|
# TODO manage the index when it will be needed
|
||||||
|
# using node.slice
|
||||||
|
if isinstance(node.value, ast.Attribute):
|
||||||
|
self.visit_Attribute(node.value)
|
||||||
|
if isinstance(node.value, ast.Subscript):
|
||||||
|
self.visit_Subscript(node.value)
|
||||||
|
elif isinstance(node.value, ast.Name):
|
||||||
|
self.visit_Name(node.value)
|
||||||
|
|
||||||
|
def visit_Name(self, node: ast.Name):
|
||||||
|
if node.id == self.to_lookup:
|
||||||
|
self.temp.append(node.id)
|
||||||
|
self.temp.reverse()
|
||||||
|
self.sequences.append(self.temp.copy())
|
||||||
|
self.temp.clear()
|
||||||
@@ -1,6 +1,7 @@
|
|||||||
import importlib
|
import importlib
|
||||||
import pkgutil
|
import pkgutil
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
from common.global_symbols import CustomType
|
from common.global_symbols import CustomType
|
||||||
|
|
||||||
@@ -227,6 +228,25 @@ def unstr_concept(concept_repr, prefix='c:'):
|
|||||||
return key if key != "" else None, c_id if c_id != "" else None
|
return key if key != "" else None, c_id if c_id != "" else None
|
||||||
|
|
||||||
|
|
||||||
|
def encode_concept(t: tuple | Any, wrapper="C"):
|
||||||
|
"""
|
||||||
|
Given a tuple of concept id, concept id
|
||||||
|
Create a valid Python identifier that can be parsed back
|
||||||
|
|
||||||
|
>>> assert encode_concept(("key", "id")) == "__C__KEY_key__ID_id__C__"
|
||||||
|
>>> assert encode_concept((None, "id")) == "__C__KEY_00None00__ID_id__C__"
|
||||||
|
>>> assert encode_concept(("key", None)) == "__C__KEY_key__ID_00None00__C__"
|
||||||
|
|
||||||
|
:param t:
|
||||||
|
:param wrapper:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
|
||||||
|
key, id_ = (t[0], t[1]) if isinstance(t, tuple) else (t.key, t.id)
|
||||||
|
sanitized_key = "".join(c if c.isalnum() else "0" for c in key) if key else "00None00"
|
||||||
|
return f"__{wrapper}__KEY_{sanitized_key}__ID_{id_ or '00None00'}__{wrapper}__"
|
||||||
|
|
||||||
|
|
||||||
def compute_hash(obj):
|
def compute_hash(obj):
|
||||||
"""
|
"""
|
||||||
Helper to get the hash from collection
|
Helper to get the hash from collection
|
||||||
@@ -291,3 +311,55 @@ def to_dict(items, get_attr):
|
|||||||
res.setdefault(get_attr(item), []).append(item)
|
res.setdefault(get_attr(item), []).append(item)
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def get_text_from_tokens(tokens, custom_switcher=None, tracker=None):
|
||||||
|
"""
|
||||||
|
Create the source code, from the list of token
|
||||||
|
:param tokens: list of tokens
|
||||||
|
:param custom_switcher: to override the behaviour (the return value) of some token
|
||||||
|
:param tracker: keep track of the original token value when custom switched
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
if tokens is None:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
if not hasattr(tokens, "__iter__"):
|
||||||
|
tokens = [tokens]
|
||||||
|
|
||||||
|
switcher = custom_switcher or {}
|
||||||
|
|
||||||
|
res = ""
|
||||||
|
for token in tokens:
|
||||||
|
value = switcher.get(token.type, lambda t: t.str_value)(token)
|
||||||
|
res += value
|
||||||
|
if tracker is not None and token.type in custom_switcher:
|
||||||
|
tracker[value] = token
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def dict_product(a, b):
|
||||||
|
"""
|
||||||
|
Cartesian product like where a and b are list of dictionaries
|
||||||
|
>>> a = [{"a": "a", "b":"b", "c":"c"}]
|
||||||
|
>>> b = [{"d":"d1"}, {"d":"d2"}]
|
||||||
|
>>>
|
||||||
|
>>> assert dict_product(a, b) == [{"a": "a", "b":"b", "c":"c", "d":"d1"}, {"a": "a", "b":"b", "c":"c", "d":"d2"}]
|
||||||
|
|
||||||
|
:param a:
|
||||||
|
:param b:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
if a is None or len(a) == 0:
|
||||||
|
return b
|
||||||
|
if b is None or len(b) == 0:
|
||||||
|
return a
|
||||||
|
|
||||||
|
res = []
|
||||||
|
for item_a in a:
|
||||||
|
for item_b in b:
|
||||||
|
items = item_a.copy()
|
||||||
|
items.update(item_b)
|
||||||
|
res.append(items)
|
||||||
|
|
||||||
|
return res
|
||||||
|
|||||||
@@ -5,3 +5,4 @@ class BuiltinConcepts:
|
|||||||
UNKNOWN_CONCEPT = "__UNKNOWN_CONCEPT"
|
UNKNOWN_CONCEPT = "__UNKNOWN_CONCEPT"
|
||||||
USER_INPUT = "__USER_INPUT"
|
USER_INPUT = "__USER_INPUT"
|
||||||
PARSER_INPUT = "__PARSER_INPUT"
|
PARSER_INPUT = "__PARSER_INPUT"
|
||||||
|
PYTHON_CODE = "__PYTHON_CODE"
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ import time
|
|||||||
from core.Event import Event
|
from core.Event import Event
|
||||||
|
|
||||||
|
|
||||||
class ExecutionContextActions:
|
class ContextActions:
|
||||||
TESTING = "Testing"
|
TESTING = "Testing"
|
||||||
INIT_SHEERKA = "Init Sheerka"
|
INIT_SHEERKA = "Init Sheerka"
|
||||||
EVALUATE_USER_INPUT = "Evaluate user input"
|
EVALUATE_USER_INPUT = "Evaluate user input"
|
||||||
@@ -18,9 +18,16 @@ class ExecutionContextActions:
|
|||||||
EVALUATION = "Evaluation"
|
EVALUATION = "Evaluation"
|
||||||
AFTER_EVALUATION = "After Evaluation"
|
AFTER_EVALUATION = "After Evaluation"
|
||||||
|
|
||||||
|
EVALUATING_CONCEPT = "Evaluating concept"
|
||||||
|
BUILD_CONCEPT = "Building all attributes"
|
||||||
|
BUILD_CONCEPT_ATTR = "Building one attribute"
|
||||||
|
EVAL_CONCEPT = "Evaluating all attributes"
|
||||||
|
EVAL_CONCEPT_ATTR = "Evaluating one attribute"
|
||||||
|
|
||||||
|
|
||||||
class ContextHint:
|
class ContextHint:
|
||||||
REDUCE_CONCEPTS = "Reduce Concepts" # to tell the process to only keep the meaningful results
|
REDUCE_CONCEPTS = "Reduce Concepts" # to tell the process to only keep the meaningful results
|
||||||
|
EXPRESSION_ONLY_REQUESTED = "Expression Only"
|
||||||
|
|
||||||
|
|
||||||
ids = {} # keep track of the next execution context id, for a given event id
|
ids = {} # keep track of the next execution context id, for a given event id
|
||||||
@@ -51,7 +58,7 @@ class ExecutionContext:
|
|||||||
who: str,
|
who: str,
|
||||||
event: Event,
|
event: Event,
|
||||||
sheerka,
|
sheerka,
|
||||||
action: ExecutionContextActions,
|
action: ContextActions,
|
||||||
action_context: object,
|
action_context: object,
|
||||||
desc: str = None,
|
desc: str = None,
|
||||||
logger=None,
|
logger=None,
|
||||||
@@ -102,6 +109,10 @@ class ExecutionContext:
|
|||||||
def long_id(self):
|
def long_id(self):
|
||||||
return f"{self.event.get_digest()}:{self._id}"
|
return f"{self.event.get_digest()}:{self._id}"
|
||||||
|
|
||||||
|
@property
|
||||||
|
def medium_id(self):
|
||||||
|
return f"{self.event.get_digest()[:8]}:{self._id}"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def id(self):
|
def id(self):
|
||||||
return self._id
|
return self._id
|
||||||
@@ -143,7 +154,7 @@ class ExecutionContext:
|
|||||||
|
|
||||||
def push(self,
|
def push(self,
|
||||||
who: str,
|
who: str,
|
||||||
action: ExecutionContextActions,
|
action: ContextActions,
|
||||||
action_context: object,
|
action_context: object,
|
||||||
desc: str = None,
|
desc: str = None,
|
||||||
logger=None):
|
logger=None):
|
||||||
@@ -162,6 +173,9 @@ class ExecutionContext:
|
|||||||
self._children.append(child)
|
self._children.append(child)
|
||||||
return child
|
return child
|
||||||
|
|
||||||
|
def get_parent(self):
|
||||||
|
return self._parent
|
||||||
|
|
||||||
def get_children(self, level=-1):
|
def get_children(self, level=-1):
|
||||||
"""
|
"""
|
||||||
recursively look for children
|
recursively look for children
|
||||||
@@ -173,6 +187,30 @@ class ExecutionContext:
|
|||||||
if level != 1:
|
if level != 1:
|
||||||
yield from child.get_children(level - 1)
|
yield from child.get_children(level - 1)
|
||||||
|
|
||||||
|
def get_parents(self, level=-1):
|
||||||
|
"""
|
||||||
|
recursively look for parent
|
||||||
|
:return:
|
||||||
|
:rtype:
|
||||||
|
"""
|
||||||
|
if level == 0 or self._parent is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
yield self._parent
|
||||||
|
yield from self._parent.get_parents(level - 1)
|
||||||
|
|
||||||
|
def in_context(self, hint: ContextHint):
|
||||||
|
return hint in self.protected_hints or \
|
||||||
|
hint in self.global_hints or \
|
||||||
|
hint in self.private_hints
|
||||||
|
|
||||||
|
def get_from_short_term_memory(self, key):
|
||||||
|
return self.sheerka.get_from_short_term_memory(self, key)
|
||||||
|
|
||||||
|
def log(self, message: str, who: str = None):
|
||||||
|
"""Send debug information to logger"""
|
||||||
|
pass
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self._start = time.time_ns()
|
self._start = time.time_ns()
|
||||||
return self
|
return self
|
||||||
|
|||||||
+33
-13
@@ -10,21 +10,21 @@ from caching.Cache import Cache
|
|||||||
from caching.IncCache import IncCache
|
from caching.IncCache import IncCache
|
||||||
from common.utils import get_logger_name, get_sub_classes, import_module_and_sub_module
|
from common.utils import get_logger_name, get_sub_classes, import_module_and_sub_module
|
||||||
from core.BuiltinConcepts import BuiltinConcepts
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
from core.ErrorContext import ErrorContext
|
|
||||||
from core.Event import Event
|
from core.Event import Event
|
||||||
from core.ExecutionContext import ContextHint, ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ContextHint, ExecutionContext, ContextActions
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from core.concept import Concept, ConceptMetadata
|
from core.concept import Concept, ConceptMetadata
|
||||||
|
from core.error import ErrorContext
|
||||||
from ontologies.SheerkaOntologyManager import SheerkaOntologyManager
|
from ontologies.SheerkaOntologyManager import SheerkaOntologyManager
|
||||||
from server.authentication import User
|
from server.authentication import User
|
||||||
|
|
||||||
EXECUTE_STEPS = [
|
EXECUTE_STEPS = [
|
||||||
ExecutionContextActions.BEFORE_PARSING,
|
ContextActions.BEFORE_PARSING,
|
||||||
ExecutionContextActions.PARSING,
|
ContextActions.PARSING,
|
||||||
ExecutionContextActions.AFTER_PARSING,
|
ContextActions.AFTER_PARSING,
|
||||||
ExecutionContextActions.BEFORE_EVALUATION,
|
ContextActions.BEFORE_EVALUATION,
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
ExecutionContextActions.AFTER_EVALUATION
|
ContextActions.AFTER_EVALUATION
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@@ -122,6 +122,7 @@ class Sheerka:
|
|||||||
self.om = SheerkaOntologyManager(self, root_folder)
|
self.om = SheerkaOntologyManager(self, root_folder)
|
||||||
# self.builtin_cache, self.builtin_cache_by_class_name = self.get_builtins_classes_as_dict()
|
# self.builtin_cache, self.builtin_cache_by_class_name = self.get_builtins_classes_as_dict()
|
||||||
|
|
||||||
|
self.initialize_bind_methods()
|
||||||
self.initialize_caching()
|
self.initialize_caching()
|
||||||
self.initialize_evaluators()
|
self.initialize_evaluators()
|
||||||
self.initialize_services()
|
self.initialize_services()
|
||||||
@@ -133,7 +134,7 @@ class Sheerka:
|
|||||||
with ExecutionContext(self.name,
|
with ExecutionContext(self.name,
|
||||||
event,
|
event,
|
||||||
self,
|
self,
|
||||||
ExecutionContextActions.INIT_SHEERKA,
|
ContextActions.INIT_SHEERKA,
|
||||||
None,
|
None,
|
||||||
desc="Initializing Sheerka.") as exec_context:
|
desc="Initializing Sheerka.") as exec_context:
|
||||||
if self.om.current_sdp().first_time:
|
if self.om.current_sdp().first_time:
|
||||||
@@ -165,6 +166,14 @@ class Sheerka:
|
|||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
def initialize_bind_methods(self):
|
||||||
|
"""
|
||||||
|
Add some methods to the list of available methods
|
||||||
|
:return:
|
||||||
|
:rtype:
|
||||||
|
"""
|
||||||
|
self.bind_service_method(self.name, self.echo, False)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def initialize_logging(is_debug, root_folder):
|
def initialize_logging(is_debug, root_folder):
|
||||||
if is_debug:
|
if is_debug:
|
||||||
@@ -206,9 +215,9 @@ class Sheerka:
|
|||||||
"""
|
"""
|
||||||
self.init_log.info("Initializing services")
|
self.init_log.info("Initializing services")
|
||||||
|
|
||||||
import_module_and_sub_module('core.services')
|
import_module_and_sub_module('services')
|
||||||
base_class = "core.services.BaseService.BaseService"
|
base_class = "services.BaseService.BaseService"
|
||||||
services = [service(self) for service in get_sub_classes("core.services", base_class)]
|
services = [service(self) for service in get_sub_classes("services", base_class)]
|
||||||
services.sort(key=attrgetter("order"))
|
services.sort(key=attrgetter("order"))
|
||||||
for service in services:
|
for service in services:
|
||||||
if hasattr(service, "initialize"):
|
if hasattr(service, "initialize"):
|
||||||
@@ -282,7 +291,7 @@ class Sheerka:
|
|||||||
with ExecutionContext(user.email,
|
with ExecutionContext(user.email,
|
||||||
event,
|
event,
|
||||||
self,
|
self,
|
||||||
ExecutionContextActions.EVALUATE_USER_INPUT,
|
ContextActions.EVALUATE_USER_INPUT,
|
||||||
command,
|
command,
|
||||||
desc=f"Evaluating '{command}'",
|
desc=f"Evaluating '{command}'",
|
||||||
global_hints=self.global_context_hints.copy()) as exec_context:
|
global_hints=self.global_context_hints.copy()) as exec_context:
|
||||||
@@ -322,3 +331,14 @@ class Sheerka:
|
|||||||
return a.id == b[3:-1]
|
return a.id == b[3:-1]
|
||||||
|
|
||||||
return a.key == b
|
return a.key == b
|
||||||
|
|
||||||
|
def echo(self, msg):
|
||||||
|
"""
|
||||||
|
test function
|
||||||
|
:param msg:
|
||||||
|
:type msg:
|
||||||
|
:return:
|
||||||
|
:rtype:
|
||||||
|
"""
|
||||||
|
|
||||||
|
return msg
|
||||||
|
|||||||
+5
-2
@@ -14,7 +14,7 @@ class ConceptDefaultProps:
|
|||||||
RET = "#ret#"
|
RET = "#ret#"
|
||||||
|
|
||||||
|
|
||||||
DefaultProps = [v for k, v in ConceptDefaultProps.__dict__.items() if not k.startswith("_")]
|
ConceptDefaultPropsAttrs = [v for k, v in ConceptDefaultProps.__dict__.items() if not k.startswith("_")]
|
||||||
|
|
||||||
|
|
||||||
class DefinitionType:
|
class DefinitionType:
|
||||||
@@ -49,6 +49,9 @@ class ConceptMetadata:
|
|||||||
digest: str = None
|
digest: str = None
|
||||||
all_attrs: tuple = None
|
all_attrs: tuple = None
|
||||||
|
|
||||||
|
def get_metadata(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ConceptRuntimeInfo:
|
class ConceptRuntimeInfo:
|
||||||
@@ -57,7 +60,7 @@ class ConceptRuntimeInfo:
|
|||||||
They are related to the instance of the concept
|
They are related to the instance of the concept
|
||||||
"""
|
"""
|
||||||
is_evaluated: bool = False # True is the concept is evaluated by sheerka.eval_concept()
|
is_evaluated: bool = False # True is the concept is evaluated by sheerka.eval_concept()
|
||||||
need_validation: bool = False # True if the properties of the concept need to be validated
|
need_validation: bool = True # True if the properties of the concept need to be validated
|
||||||
recognized_by: str = None # RECOGNIZED_BY_ID, RECOGNIZED_BY_NAME, RECOGNIZED_BY_KEY (from Sheerka.py)
|
recognized_by: str = None # RECOGNIZED_BY_ID, RECOGNIZED_BY_NAME, RECOGNIZED_BY_KEY (from Sheerka.py)
|
||||||
|
|
||||||
def copy(self):
|
def copy(self):
|
||||||
|
|||||||
@@ -1,9 +1,26 @@
|
|||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from common.utils import compute_hash
|
from common.utils import compute_hash
|
||||||
from core.ExecutionContext import ExecutionContext
|
from core.ExecutionContext import ExecutionContext
|
||||||
|
|
||||||
|
|
||||||
class SheerkaException(Exception):
|
class SheerkaException(Exception):
|
||||||
pass
|
def get_error_msg(self) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class MethodAccessError(SheerkaException):
|
||||||
|
def __init__(self, method_name):
|
||||||
|
self.method_name = method_name
|
||||||
|
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
return f"Cannot access method '{self.method_name}'"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ErrorObj:
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class ErrorContext:
|
class ErrorContext:
|
||||||
@@ -18,7 +35,7 @@ class ErrorContext:
|
|||||||
self.parents = None
|
self.parents = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return f"Error(who={self.who}, context_id={self.context.long_id}, value={self.value})"
|
return f"Error(who={self.who}, context_id={self.context.medium_id}, value={self.value})"
|
||||||
|
|
||||||
def __eq__(self, other):
|
def __eq__(self, other):
|
||||||
if id(self) == id(other):
|
if id(self) == id(other):
|
||||||
@@ -33,3 +50,16 @@ class ErrorContext:
|
|||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self.who, self.context.id, compute_hash(self.value)))
|
return hash((self.who, self.context.id, compute_hash(self.value)))
|
||||||
|
|
||||||
|
def get_error_msg(self):
|
||||||
|
value_as_list = self.value if isinstance(self.value, list) else [self.value]
|
||||||
|
temp = []
|
||||||
|
for value in value_as_list:
|
||||||
|
if isinstance(value, str):
|
||||||
|
temp.append(value)
|
||||||
|
elif isinstance(value, (SheerkaException, ErrorObj)):
|
||||||
|
temp.append(value.get_error_msg())
|
||||||
|
else:
|
||||||
|
temp.append(repr(value))
|
||||||
|
|
||||||
|
return ", ".join(temp)
|
||||||
@@ -0,0 +1,59 @@
|
|||||||
|
import ast
|
||||||
|
import copy
|
||||||
|
|
||||||
|
|
||||||
|
class PythonFragment:
|
||||||
|
|
||||||
|
def __init__(self, source_code, ast_tree=None, original_source=None, namespace=None):
|
||||||
|
self.source_code = source_code # what was parsed
|
||||||
|
self.original_source = original_source or source_code # to remember source before concepts id replacements
|
||||||
|
self.ast_tree = ast_tree # if ast_ else ast.parse(source, mode="eval") if source else None
|
||||||
|
self.namespace = namespace or {} # when objects (mainly concepts or rules) are recognized in the expression
|
||||||
|
self._compiled = None
|
||||||
|
self.ast_str = self.get_dump()
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
ast_type = "expr" if isinstance(self.ast_tree, ast.Expression) else "module"
|
||||||
|
return "PythonNode(" + ast_type + "='" + self.source_code + "')"
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, PythonFragment):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.source_code == other.source_code and self.original_source == other.original_source
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.source_code, self.original_source))
|
||||||
|
|
||||||
|
def get_dump(self):
|
||||||
|
if not self.ast_tree:
|
||||||
|
return None
|
||||||
|
|
||||||
|
dump = ast.dump(self.ast_tree)
|
||||||
|
for to_remove in [", ctx=Load()", ", kind=None", ", type_ignores=[]"]:
|
||||||
|
dump = dump.replace(to_remove, "")
|
||||||
|
return dump
|
||||||
|
|
||||||
|
def get_compiled(self):
|
||||||
|
if self._compiled is None:
|
||||||
|
if isinstance(self.ast_tree, ast.Expression):
|
||||||
|
self._compiled = compile(self.ast_tree, "<string>", "eval")
|
||||||
|
else:
|
||||||
|
# in case of module, if the last expr is an expression, we want to be able to return its value
|
||||||
|
if isinstance(self.ast_tree.body[-1], ast.Expr):
|
||||||
|
init_ast = copy.deepcopy(self.ast_tree)
|
||||||
|
init_ast.body = self.ast_tree.body[:-1]
|
||||||
|
last_ast = copy.deepcopy(self.ast_tree)
|
||||||
|
last_ast_as_expression = self.expr_to_expression(last_ast.body[0])
|
||||||
|
self._compiled = [compile(init_ast, "<ast>", "exec"),
|
||||||
|
compile(last_ast_as_expression, "<ast>", "eval")]
|
||||||
|
else:
|
||||||
|
self._compiled = compile(self.ast_tree, "<string>", "exec")
|
||||||
|
|
||||||
|
return self._compiled
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def expr_to_expression(expr):
|
||||||
|
expr.lineno = 0
|
||||||
|
expr.col_offset = 0
|
||||||
|
return ast.Expression(expr.value, lineno=0, col_offset=0)
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from core.BuiltinConcepts import BuiltinConcepts
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
from core.ErrorContext import ErrorContext
|
from core.error import ErrorContext
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from evaluators.base_evaluator import EvaluatorEvalResult, EvaluatorMatchResult, OneReturnValueEvaluator
|
from evaluators.base_evaluator import EvaluatorEvalResult, EvaluatorMatchResult, OneReturnValueEvaluator
|
||||||
from parsers.ParserInput import ParserInput
|
from parsers.ParserInput import ParserInput
|
||||||
@@ -10,7 +10,7 @@ class CreateParserInput(OneReturnValueEvaluator):
|
|||||||
NAME = "CreateParserInput"
|
NAME = "CreateParserInput"
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__(self.NAME, ExecutionContextActions.BEFORE_EVALUATION, 50)
|
super().__init__(self.NAME, ContextActions.BEFORE_PARSING, 50)
|
||||||
|
|
||||||
def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult:
|
def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult:
|
||||||
if return_value.status and \
|
if return_value.status and \
|
||||||
@@ -26,5 +26,5 @@ class CreateParserInput(OneReturnValueEvaluator):
|
|||||||
return EvaluatorEvalResult([new_ret_val], [return_value])
|
return EvaluatorEvalResult([new_ret_val], [return_value])
|
||||||
else:
|
else:
|
||||||
error = ErrorContext(self.NAME, context, parser_input)
|
error = ErrorContext(self.NAME, context, parser_input)
|
||||||
new_ret_val = ReturnValue(self.NAME, False, error, parents=[return_value])
|
error_ret_val = ReturnValue(self.NAME, False, error, parents=[return_value])
|
||||||
return EvaluatorEvalResult([new_ret_val], [return_value])
|
return EvaluatorEvalResult([error_ret_val], [return_value])
|
||||||
|
|||||||
@@ -0,0 +1,31 @@
|
|||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
|
from core.ReturnValue import ReturnValue
|
||||||
|
from core.error import ErrorContext
|
||||||
|
from evaluators.base_evaluator import EvaluatorEvalResult, EvaluatorMatchResult, OneReturnValueEvaluator
|
||||||
|
|
||||||
|
|
||||||
|
class PythonEvaluator(OneReturnValueEvaluator):
|
||||||
|
NAME = "PythonEvaluator"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(self.NAME, ContextActions.EVALUATION, 50)
|
||||||
|
|
||||||
|
def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult:
|
||||||
|
return EvaluatorMatchResult(return_value.status and
|
||||||
|
context.sheerka.isinstance(return_value.value, BuiltinConcepts.PYTHON_CODE))
|
||||||
|
|
||||||
|
def eval(self, context: ExecutionContext,
|
||||||
|
evaluation_context: object,
|
||||||
|
return_value: ReturnValue) -> EvaluatorEvalResult:
|
||||||
|
|
||||||
|
sheerka = context.sheerka
|
||||||
|
fragment = return_value.value.pf
|
||||||
|
|
||||||
|
evaluated = sheerka.evaluate_python(context, fragment)
|
||||||
|
if isinstance(evaluated, ErrorContext):
|
||||||
|
return EvaluatorEvalResult([ReturnValue(self.name, False, evaluated, parents=[return_value])],
|
||||||
|
[])
|
||||||
|
else:
|
||||||
|
return EvaluatorEvalResult([ReturnValue(self.name, True, evaluated, parents=[return_value])],
|
||||||
|
[return_value])
|
||||||
@@ -0,0 +1,69 @@
|
|||||||
|
import ast
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from common.utils import encode_concept
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
|
from core.ReturnValue import ReturnValue
|
||||||
|
from core.error import ErrorContext, ErrorObj
|
||||||
|
from core.python_fragment import PythonFragment
|
||||||
|
from evaluators.base_evaluator import EvaluatorEvalResult, EvaluatorMatchResult, OneReturnValueEvaluator
|
||||||
|
from parsers.tokenizer import TokenKind
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass()
|
||||||
|
class PythonErrorNode(ErrorObj):
|
||||||
|
source: str
|
||||||
|
exception: Exception
|
||||||
|
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
return repr(self.exception)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, PythonErrorNode):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.source == other.source and self.exception == other.exception
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.source, self.exception))
|
||||||
|
|
||||||
|
|
||||||
|
class PythonParser(OneReturnValueEvaluator):
|
||||||
|
NAME = "PythonParser"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(self.NAME, ContextActions.PARSING, 80)
|
||||||
|
|
||||||
|
def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult:
|
||||||
|
return EvaluatorMatchResult(return_value.status and
|
||||||
|
context.sheerka.isinstance(return_value.value, BuiltinConcepts.PARSER_INPUT))
|
||||||
|
|
||||||
|
def eval(self, context: ExecutionContext,
|
||||||
|
evaluation_context: object,
|
||||||
|
return_value: ReturnValue) -> EvaluatorEvalResult:
|
||||||
|
parser_input = return_value.value.body
|
||||||
|
|
||||||
|
tracker = {} # to keep track of concept tokens (c:xxx:)
|
||||||
|
python_switcher = {TokenKind.CONCEPT: lambda t: encode_concept(t.value),
|
||||||
|
TokenKind.RULE: lambda t: encode_concept(t.value, "R")}
|
||||||
|
source_code = parser_input.as_text(python_switcher, tracker).lstrip() # right side spaces must be kept
|
||||||
|
|
||||||
|
try:
|
||||||
|
ast_tree = ast.parse(source_code, f"<user input>", 'eval')
|
||||||
|
except:
|
||||||
|
try:
|
||||||
|
ast_tree = ast.parse(source_code, f"<user input>", 'exec')
|
||||||
|
except Exception as error:
|
||||||
|
error_context = ErrorContext(self.NAME, context, PythonErrorNode(parser_input.as_text(), error))
|
||||||
|
error_ret_val = ReturnValue(self.NAME, False, error_context, [return_value])
|
||||||
|
return EvaluatorEvalResult([error_ret_val], [])
|
||||||
|
|
||||||
|
# Successfully parsed some python code
|
||||||
|
python_code = context.sheerka.newn(BuiltinConcepts.PYTHON_CODE,
|
||||||
|
pf=PythonFragment(source_code,
|
||||||
|
ast_tree,
|
||||||
|
parser_input.original_text,
|
||||||
|
tracker))
|
||||||
|
new = ReturnValue(self.NAME, True, python_code, parents=[return_value])
|
||||||
|
return EvaluatorEvalResult([new], [return_value])
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
|
|
||||||
|
|
||||||
@@ -21,7 +21,7 @@ class BaseEvaluator:
|
|||||||
Base class to evaluate ReturnValues
|
Base class to evaluate ReturnValues
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, name, step: ExecutionContextActions, priority: int, enabled=True):
|
def __init__(self, name, step: ContextActions, priority: int, enabled=True):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.step = step
|
self.step = step
|
||||||
self.priority = priority
|
self.priority = priority
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
from common.utils import get_text_from_tokens
|
||||||
from parsers.tokenizer import Tokenizer
|
from parsers.tokenizer import Tokenizer
|
||||||
|
|
||||||
|
|
||||||
@@ -5,15 +6,24 @@ class ParserInput:
|
|||||||
def __init__(self, text, yield_oef=True):
|
def __init__(self, text, yield_oef=True):
|
||||||
self.original_text = text
|
self.original_text = text
|
||||||
self.yield_oef = yield_oef
|
self.yield_oef = yield_oef
|
||||||
self.tokens = None
|
self.all_tokens = None
|
||||||
self.exception = None
|
self.exception = None
|
||||||
|
|
||||||
def init(self) -> bool:
|
def init(self) -> bool:
|
||||||
try:
|
try:
|
||||||
# the eof if forced, but will not be yield if not set to.
|
# the eof if forced, but will not be yield if not set to.
|
||||||
self.tokens = list(Tokenizer(self.original_text, yield_eof=True))
|
self.all_tokens = list(Tokenizer(self.original_text, yield_eof=True))
|
||||||
return True
|
return True
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.tokens = None
|
self.all_tokens = None
|
||||||
self.exception = ex
|
self.exception = ex
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def as_text(self, custom_switcher=None, tracker=None):
|
||||||
|
if self.all_tokens is None:
|
||||||
|
raise Exception("You must call init() first !")
|
||||||
|
|
||||||
|
return get_text_from_tokens(self.all_tokens, custom_switcher, tracker)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"ParserInput('{self.original_text}', len={len(self.all_tokens)})"
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ from dataclasses import dataclass, field
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
from common.global_symbols import VARIABLE_PREFIX
|
from common.global_symbols import VARIABLE_PREFIX
|
||||||
|
from common.utils import str_concept
|
||||||
|
|
||||||
|
|
||||||
class TokenKind(Enum):
|
class TokenKind(Enum):
|
||||||
@@ -11,7 +12,6 @@ class TokenKind(Enum):
|
|||||||
IDENTIFIER = "identifier"
|
IDENTIFIER = "identifier"
|
||||||
CONCEPT = "concept"
|
CONCEPT = "concept"
|
||||||
RULE = "rule"
|
RULE = "rule"
|
||||||
EXPR = "expression"
|
|
||||||
STRING = "string"
|
STRING = "string"
|
||||||
NUMBER = "number"
|
NUMBER = "number"
|
||||||
LPAR = "lpar"
|
LPAR = "lpar"
|
||||||
|
|||||||
+10
-1
@@ -8,6 +8,8 @@ from starlette.middleware.cors import CORSMiddleware
|
|||||||
|
|
||||||
from constants import CLIENT_OPERATION_QUIT, EXIT_COMMANDS, SHEERKA_PORT
|
from constants import CLIENT_OPERATION_QUIT, EXIT_COMMANDS, SHEERKA_PORT
|
||||||
from core.Sheerka import Sheerka
|
from core.Sheerka import Sheerka
|
||||||
|
from core.concept import Concept
|
||||||
|
from core.error import ErrorContext
|
||||||
from server.authentication import ACCESS_TOKEN_EXPIRE_MINUTES, User, authenticate_user, create_access_token, \
|
from server.authentication import ACCESS_TOKEN_EXPIRE_MINUTES, User, authenticate_user, create_access_token, \
|
||||||
fake_users_db, get_current_active_user
|
fake_users_db, get_current_active_user
|
||||||
|
|
||||||
@@ -93,10 +95,17 @@ async def command(message: str, current_user: User = Depends(get_current_active_
|
|||||||
"response": "Take care.",
|
"response": "Take care.",
|
||||||
"command": CLIENT_OPERATION_QUIT
|
"command": CLIENT_OPERATION_QUIT
|
||||||
}
|
}
|
||||||
|
|
||||||
res = sheerka.evaluate_user_input(message, current_user)
|
res = sheerka.evaluate_user_input(message, current_user)
|
||||||
|
value = res[0].value
|
||||||
|
if isinstance(value, Concept) and value.get_runtime_info().is_evaluated:
|
||||||
|
value = value.body
|
||||||
|
if isinstance(value, ErrorContext):
|
||||||
|
value = value.get_error_msg()
|
||||||
|
|
||||||
return {
|
return {
|
||||||
"status": res[0].status,
|
"status": res[0].status,
|
||||||
"response": res[0].value.body,
|
"response": value,
|
||||||
"command": None
|
"command": None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,27 @@
|
|||||||
|
from services.BaseService import BaseService
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaAdmin(BaseService):
|
||||||
|
"""
|
||||||
|
Service for admin function, when using the CLI
|
||||||
|
"""
|
||||||
|
NAME = "Admin"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.extended_isinstance, False)
|
||||||
|
|
||||||
|
def extended_isinstance(self, a, b):
|
||||||
|
"""
|
||||||
|
switch between sheerka.isinstance and builtin.isinstance
|
||||||
|
:param a:
|
||||||
|
:param b:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(b, (type, tuple)):
|
||||||
|
return isinstance(a, b)
|
||||||
|
|
||||||
|
return self.sheerka.isinstance(a, b)
|
||||||
@@ -0,0 +1,140 @@
|
|||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from caching.FastCache import FastCache
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.ExecutionContext import ContextActions, ExecutionContext
|
||||||
|
from core.ReturnValue import ReturnValue
|
||||||
|
from core.concept import Concept, ConceptDefaultProps, ConceptDefaultPropsAttrs, ConceptMetadata
|
||||||
|
from core.error import ErrorObj, SheerkaException
|
||||||
|
from core.python_fragment import PythonFragment
|
||||||
|
from services.BaseService import BaseService
|
||||||
|
from services.SheerkaPython import EvaluationRef
|
||||||
|
|
||||||
|
PARSING_STEPS = [
|
||||||
|
ContextActions.BEFORE_PARSING,
|
||||||
|
ContextActions.PARSING,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class ConceptCompiled:
|
||||||
|
"""
|
||||||
|
Container for all PythonFragment
|
||||||
|
attribute will be accessed by setattr() and getattr()
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ConceptEvaluationHints:
|
||||||
|
force_evaluation: bool = False
|
||||||
|
|
||||||
|
|
||||||
|
class ConceptEvaluator(BaseService):
|
||||||
|
"""
|
||||||
|
The service is used to evaluate a concept
|
||||||
|
"""
|
||||||
|
|
||||||
|
NAME = "ConceptEvaluator"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka)
|
||||||
|
self.compiled_cache = FastCache()
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.evaluate_concept, True)
|
||||||
|
|
||||||
|
def evaluate_concept(self, context: ExecutionContext,
|
||||||
|
concept: Concept,
|
||||||
|
hints: ConceptEvaluationHints = None) -> Concept:
|
||||||
|
hints = hints or ConceptEvaluationHints()
|
||||||
|
|
||||||
|
with context.push(self.NAME, ContextActions.EVALUATING_CONCEPT, {"concept": concept}) as sub_context:
|
||||||
|
|
||||||
|
# if the concept is already evaluated, no need to do it again
|
||||||
|
if not hints.force_evaluation and concept.get_runtime_info().is_evaluated:
|
||||||
|
return concept
|
||||||
|
|
||||||
|
if concept.get_definition_digest() not in self.compiled_cache:
|
||||||
|
compiled = self.build(sub_context, concept.get_metadata())
|
||||||
|
self.compiled_cache.put(concept.get_definition_digest(), compiled)
|
||||||
|
|
||||||
|
self.inner_eval_concept(context, concept)
|
||||||
|
return concept
|
||||||
|
|
||||||
|
def build(self, context: ExecutionContext, metadata: ConceptMetadata):
|
||||||
|
sheerka = context.sheerka
|
||||||
|
action_context = {ConceptDefaultProps.WHERE: metadata.where,
|
||||||
|
ConceptDefaultProps.PRE: metadata.pre,
|
||||||
|
ConceptDefaultProps.BODY: metadata.body,
|
||||||
|
ConceptDefaultProps.POST: metadata.post,
|
||||||
|
ConceptDefaultProps.RET: metadata.ret}
|
||||||
|
for k, v in metadata.variables:
|
||||||
|
action_context[k] = v
|
||||||
|
|
||||||
|
compiled = ConceptCompiled()
|
||||||
|
with context.push(self.NAME, ContextActions.BUILD_CONCEPT, {"metadata": action_context}) as sub_context:
|
||||||
|
|
||||||
|
for attr, source_code in action_context.items():
|
||||||
|
if source_code is None or source_code == "":
|
||||||
|
setattr(compiled, attr, None)
|
||||||
|
continue
|
||||||
|
|
||||||
|
with sub_context.push(self.NAME, ContextActions.BUILD_CONCEPT_ATTR, {"attr": attr}) as attr_context:
|
||||||
|
start = ReturnValue(self.NAME,
|
||||||
|
True,
|
||||||
|
sheerka.newn(BuiltinConcepts.USER_INPUT, command=source_code))
|
||||||
|
|
||||||
|
# parse the code to get the python fragment
|
||||||
|
attr_context.add_inputs(start=start)
|
||||||
|
ret = sheerka.execute(attr_context, [start], PARSING_STEPS)
|
||||||
|
attr_context.add_values(return_values=ret)
|
||||||
|
|
||||||
|
# TODO : manage when the parsing fails
|
||||||
|
|
||||||
|
# Add reference to internal variables
|
||||||
|
python_fragment = ret[0].value.pf
|
||||||
|
for k, v in metadata.variables:
|
||||||
|
python_fragment.namespace[k] = EvaluationRef("self", k)
|
||||||
|
|
||||||
|
setattr(compiled, attr, python_fragment)
|
||||||
|
|
||||||
|
return compiled
|
||||||
|
|
||||||
|
def inner_eval_concept(self, context, concept):
|
||||||
|
sheerka = context.sheerka
|
||||||
|
compiled = self.compiled_cache.get(concept.get_definition_digest())
|
||||||
|
compiled_debug = self._get_compiled_debug(compiled)
|
||||||
|
|
||||||
|
attributes = self._get_attributes_to_eval(context, concept)
|
||||||
|
|
||||||
|
with context.push(self.NAME, ContextActions.EVAL_CONCEPT, {"compiled": compiled_debug}) as sub_context:
|
||||||
|
# first evaluate the variables
|
||||||
|
for attr in attributes:
|
||||||
|
with context.push(self.NAME, ContextActions.EVAL_CONCEPT_ATTR, {"attr": attr}) as attr_context:
|
||||||
|
res = sheerka.evaluate_python(sub_context,
|
||||||
|
getattr(compiled, attr),
|
||||||
|
{"self": concept})
|
||||||
|
# TODO : manage errors
|
||||||
|
concept.set_value(attr, res)
|
||||||
|
|
||||||
|
return concept
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_attributes_to_eval(context, concept):
|
||||||
|
res = [v[0] for v in concept.get_metadata().variables]
|
||||||
|
res += ConceptDefaultPropsAttrs
|
||||||
|
return res
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _get_compiled_debug(compiled):
|
||||||
|
ret = {}
|
||||||
|
for attr, value in vars(compiled).items():
|
||||||
|
if value is None:
|
||||||
|
ret[attr] = None
|
||||||
|
elif isinstance(value, (ErrorObj, SheerkaException)):
|
||||||
|
ret[attr] = value.get_error_msg()
|
||||||
|
elif isinstance(value, PythonFragment):
|
||||||
|
ret[attr] = value.original_source
|
||||||
|
else:
|
||||||
|
ret[attr] = repr(value)
|
||||||
|
return ret
|
||||||
@@ -6,14 +6,14 @@ from caching.Cache import Cache
|
|||||||
from caching.FastCache import FastCache
|
from caching.FastCache import FastCache
|
||||||
from caching.ListIfNeededCache import ListIfNeededCache
|
from caching.ListIfNeededCache import ListIfNeededCache
|
||||||
from common.global_symbols import NotFound, NotInit, VARIABLE_PREFIX
|
from common.global_symbols import NotFound, NotInit, VARIABLE_PREFIX
|
||||||
from common.utils import get_logger_name
|
from common.utils import get_logger_name, unstr_concept
|
||||||
from core.BuiltinConcepts import BuiltinConcepts
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
from core.ErrorContext import ErrorContext, SheerkaException
|
|
||||||
from core.ExecutionContext import ExecutionContext
|
from core.ExecutionContext import ExecutionContext
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from core.concept import Concept, ConceptMetadata, DefaultProps, DefinitionType
|
from core.concept import Concept, ConceptMetadata, ConceptDefaultPropsAttrs, DefinitionType
|
||||||
from core.services.BaseService import BaseService
|
from core.error import ErrorContext, SheerkaException
|
||||||
from parsers.tokenizer import TokenKind, Tokenizer, strip_tokens
|
from parsers.tokenizer import TokenKind, Tokenizer, strip_tokens
|
||||||
|
from services.BaseService import BaseService
|
||||||
|
|
||||||
PROPERTIES_FOR_DIGEST = ("name", "key",
|
PROPERTIES_FOR_DIGEST = ("name", "key",
|
||||||
"definition", "definition_type",
|
"definition", "definition_type",
|
||||||
@@ -22,15 +22,22 @@ PROPERTIES_FOR_DIGEST = ("name", "key",
|
|||||||
"desc", "bound_body", "autouse", "props", "variables", "parameters")
|
"desc", "bound_body", "autouse", "props", "variables", "parameters")
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class ConceptAlreadyDefined(SheerkaException):
|
class ConceptAlreadyDefined(SheerkaException):
|
||||||
concept: ConceptMetadata
|
def __init__(self, concept: ConceptMetadata, already_defined_id: str):
|
||||||
already_defined_id: str
|
self.concept = concept
|
||||||
|
self.already_defined_id = already_defined_id
|
||||||
|
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
return f"Concept {self.concept.name}, is already defined (id={self.already_defined_id})"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class InvalidBnf(SheerkaException):
|
class InvalidBnf(SheerkaException):
|
||||||
bnf: str
|
def __init__(self, bnf: str):
|
||||||
|
self.bnf = bnf
|
||||||
|
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
return f"Invalid bnf '{self.bnf}'"
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -65,8 +72,12 @@ class ConceptManager(BaseService):
|
|||||||
def initialize(self):
|
def initialize(self):
|
||||||
self.init_log.debug(f"Initializing ConceptManager, order={self.order}")
|
self.init_log.debug(f"Initializing ConceptManager, order={self.order}")
|
||||||
self.sheerka.bind_service_method(self.NAME, self.define_new_concept, True)
|
self.sheerka.bind_service_method(self.NAME, self.define_new_concept, True)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.new, True)
|
||||||
self.sheerka.bind_service_method(self.NAME, self.newn, True)
|
self.sheerka.bind_service_method(self.NAME, self.newn, True)
|
||||||
self.sheerka.bind_service_method(self.NAME, self.newi, True)
|
self.sheerka.bind_service_method(self.NAME, self.newi, True)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.get_by_name, False)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.get_by_id, False)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.get_by_key, False)
|
||||||
|
|
||||||
register_concept_cache = self.sheerka.om.register_concept_cache
|
register_concept_cache = self.sheerka.om.register_concept_cache
|
||||||
|
|
||||||
@@ -93,6 +104,7 @@ class ConceptManager(BaseService):
|
|||||||
_(3, BuiltinConcepts.UNKNOWN_CONCEPT, desc="Unknown concept", variables=("requested_name", "requested_id"))
|
_(3, BuiltinConcepts.UNKNOWN_CONCEPT, desc="Unknown concept", variables=("requested_name", "requested_id"))
|
||||||
_(4, BuiltinConcepts.USER_INPUT, desc="Any external input", variables=("command",))
|
_(4, BuiltinConcepts.USER_INPUT, desc="Any external input", variables=("command",))
|
||||||
_(5, BuiltinConcepts.PARSER_INPUT, desc="tokenized input", variables=("pi",))
|
_(5, BuiltinConcepts.PARSER_INPUT, desc="tokenized input", variables=("pi",))
|
||||||
|
_(6, BuiltinConcepts.PYTHON_CODE, desc="python code", variables=("pf",)) # pf for PythonFragment
|
||||||
|
|
||||||
self.init_log.debug('%s builtin concepts created',
|
self.init_log.debug('%s builtin concepts created',
|
||||||
len(self.sheerka.om.current_cache_manager().concept_caches))
|
len(self.sheerka.om.current_cache_manager().concept_caches))
|
||||||
@@ -215,6 +227,31 @@ class ConceptManager(BaseService):
|
|||||||
return self._inner_new(self.get_by_name(BuiltinConcepts.UNKNOWN_CONCEPT), requested_id=concept_id)
|
return self._inner_new(self.get_by_name(BuiltinConcepts.UNKNOWN_CONCEPT), requested_id=concept_id)
|
||||||
return self._inner_new(metadata, **kwargs)
|
return self._inner_new(metadata, **kwargs)
|
||||||
|
|
||||||
|
def new(self, identifier, **kwargs):
|
||||||
|
"""
|
||||||
|
Try to resolve the instantiation of a concept
|
||||||
|
:param identifier:
|
||||||
|
:type identifier:
|
||||||
|
:param kwargs:
|
||||||
|
:type kwargs:
|
||||||
|
:return:
|
||||||
|
:rtype:
|
||||||
|
"""
|
||||||
|
if isinstance(identifier, ConceptMetadata):
|
||||||
|
return self._inner_new(identifier, **kwargs)
|
||||||
|
|
||||||
|
if (tmp := unstr_concept(identifier)) != (None, None):
|
||||||
|
# manage c:name#id:
|
||||||
|
identifier = tmp
|
||||||
|
|
||||||
|
if isinstance(identifier, tuple):
|
||||||
|
return self.newi(identifier[1], **kwargs) if identifier[1] else self.newn(identifier[0], **kwargs)
|
||||||
|
|
||||||
|
if isinstance(identifier, str):
|
||||||
|
return self.newn(identifier, **kwargs)
|
||||||
|
|
||||||
|
return self._inner_new(self.get_by_name(BuiltinConcepts.UNKNOWN_CONCEPT), requested_name=identifier)
|
||||||
|
|
||||||
def get_by_name(self, key: str):
|
def get_by_name(self, key: str):
|
||||||
"""
|
"""
|
||||||
Returns a concept metadata, using its name
|
Returns a concept metadata, using its name
|
||||||
@@ -245,6 +282,9 @@ class ConceptManager(BaseService):
|
|||||||
"""
|
"""
|
||||||
return self.sheerka.om.get(self.CONCEPTS_BY_KEY_ENTRY, key)
|
return self.sheerka.om.get(self.CONCEPTS_BY_KEY_ENTRY, key)
|
||||||
|
|
||||||
|
def get_all_concepts(self):
|
||||||
|
return list(sorted(self.sheerka.om.list(self.CONCEPTS_BY_ID_ENTRY), key=lambda item: int(item.id)))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def compute_metadata_digest(metadata: ConceptMetadata):
|
def compute_metadata_digest(metadata: ConceptMetadata):
|
||||||
"""
|
"""
|
||||||
@@ -265,7 +305,7 @@ class ConceptManager(BaseService):
|
|||||||
:return:
|
:return:
|
||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
all_attrs = DefaultProps.copy()
|
all_attrs = ConceptDefaultPropsAttrs.copy()
|
||||||
if variables:
|
if variables:
|
||||||
all_attrs += [k for k, v in variables]
|
all_attrs += [k for k, v in variables]
|
||||||
|
|
||||||
@@ -1,10 +1,11 @@
|
|||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from itertools import chain
|
||||||
|
|
||||||
from common.utils import to_dict
|
from common.utils import to_dict
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from core.services.BaseService import BaseService
|
|
||||||
from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, OneReturnValueEvaluator
|
from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, OneReturnValueEvaluator
|
||||||
|
from services.BaseService import BaseService
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -16,8 +17,7 @@ class EvaluationPlan:
|
|||||||
class SheerkaEngine(BaseService):
|
class SheerkaEngine(BaseService):
|
||||||
"""
|
"""
|
||||||
This service is used to process user input
|
This service is used to process user input
|
||||||
It is responsible to parse and evaluate the information
|
It is responsible for parsing and evaluating the commands
|
||||||
It also holds the rule engine
|
|
||||||
"""
|
"""
|
||||||
NAME = "Engine"
|
NAME = "Engine"
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ class SheerkaEngine(BaseService):
|
|||||||
def call_evaluators(self,
|
def call_evaluators(self,
|
||||||
context: ExecutionContext,
|
context: ExecutionContext,
|
||||||
return_values: list[ReturnValue],
|
return_values: list[ReturnValue],
|
||||||
step: ExecutionContextActions):
|
step: ContextActions):
|
||||||
"""
|
"""
|
||||||
Calls all evaluators defined for a given step
|
Calls all evaluators defined for a given step
|
||||||
:param context:
|
:param context:
|
||||||
@@ -50,7 +50,7 @@ class SheerkaEngine(BaseService):
|
|||||||
iteration = 0
|
iteration = 0
|
||||||
while True:
|
while True:
|
||||||
with context.push(self.NAME,
|
with context.push(self.NAME,
|
||||||
ExecutionContextActions.EVALUATING_ITERATION,
|
ContextActions.EVALUATING_ITERATION,
|
||||||
{"step": step, "iteration": iteration},
|
{"step": step, "iteration": iteration},
|
||||||
desc=f"iteration #{iteration}") as iteration_context:
|
desc=f"iteration #{iteration}") as iteration_context:
|
||||||
simple_digest = return_values.copy()
|
simple_digest = return_values.copy()
|
||||||
@@ -99,18 +99,24 @@ class SheerkaEngine(BaseService):
|
|||||||
|
|
||||||
iteration_context.add_values(return_values=return_values.copy())
|
iteration_context.add_values(return_values=return_values.copy())
|
||||||
|
|
||||||
iteration += 1
|
# to avoid infinite loop
|
||||||
|
# plus already evaluated ret_val must not be evaluated a second time
|
||||||
|
already_evaluated = set(chain.from_iterable(r.parents for r in return_values if r.parents))
|
||||||
|
return_values = list(filter(lambda ret_val: ret_val not in already_evaluated, return_values))
|
||||||
|
|
||||||
if simple_digest == return_values:
|
if simple_digest == return_values:
|
||||||
# I can use a variable like 'has_changed', but I think that this comparison is explicit
|
# I can use a variable like 'has_changed', but I think that this comparison is explicit
|
||||||
# It explains that I stay in the loop if something was modified
|
# It explains that I stay in the loop if something was modified
|
||||||
break
|
break
|
||||||
|
|
||||||
|
iteration += 1
|
||||||
|
|
||||||
return return_values
|
return return_values
|
||||||
|
|
||||||
def execute(self,
|
def execute(self,
|
||||||
context: ExecutionContext,
|
context: ExecutionContext,
|
||||||
return_values: list[ReturnValue],
|
return_values: list[ReturnValue],
|
||||||
steps: list[ExecutionContextActions]):
|
steps: list[ContextActions]):
|
||||||
"""
|
"""
|
||||||
Runs the processing engine on the return_values
|
Runs the processing engine on the return_values
|
||||||
:param context:
|
:param context:
|
||||||
@@ -124,7 +130,7 @@ class SheerkaEngine(BaseService):
|
|||||||
"""
|
"""
|
||||||
for step in steps:
|
for step in steps:
|
||||||
copy = return_values.copy()
|
copy = return_values.copy()
|
||||||
with context.push(self.NAME, ExecutionContextActions.EVALUATING_STEP, {"step": step}) as sub_context:
|
with context.push(self.NAME, ContextActions.EVALUATING_STEP, {"step": step}) as sub_context:
|
||||||
sub_context.add_inputs(return_values=copy)
|
sub_context.add_inputs(return_values=copy)
|
||||||
|
|
||||||
return_values = self.call_evaluators(sub_context, return_values, step)
|
return_values = self.call_evaluators(sub_context, return_values, step)
|
||||||
@@ -134,7 +140,7 @@ class SheerkaEngine(BaseService):
|
|||||||
|
|
||||||
return return_values
|
return return_values
|
||||||
|
|
||||||
def get_evaluation_plan(self, context: ExecutionContext, step: ExecutionContextActions) -> EvaluationPlan:
|
def get_evaluation_plan(self, context: ExecutionContext, step: ContextActions) -> EvaluationPlan:
|
||||||
if step not in self.execution_plan:
|
if step not in self.execution_plan:
|
||||||
return self.no_evaluation_plan
|
return self.no_evaluation_plan
|
||||||
|
|
||||||
@@ -0,0 +1,73 @@
|
|||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from caching.FastCache import FastCache
|
||||||
|
from common.global_symbols import NotFound
|
||||||
|
from core.ExecutionContext import ExecutionContext
|
||||||
|
from services.BaseService import BaseService
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaMemory(BaseService):
|
||||||
|
"""
|
||||||
|
The purpose of this service is to remember things
|
||||||
|
There are two types of memory
|
||||||
|
* short term memory : that are not persisted
|
||||||
|
* long term memory : that are sent to sdp (through the OntologyManager)
|
||||||
|
Short term memory is also use to store PythonEvaluator results
|
||||||
|
"""
|
||||||
|
NAME = "Memory"
|
||||||
|
GLOBAL = "global" # for short term memory with no context (global variable across user inputs)
|
||||||
|
|
||||||
|
OBJECTS_ENTRY = "Memory:Objects"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka, order=13)
|
||||||
|
self.short_term_objects = FastCache()
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.get_from_short_term_memory, False, visible=False)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.add_to_short_term_memory, True, visible=False)
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.list_short_term_memory, False, visible=False)
|
||||||
|
|
||||||
|
def get_from_short_term_memory(self, context: ExecutionContext | None, key: str) -> Any:
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
id_to_use = context.id if context else self.GLOBAL
|
||||||
|
return self.short_term_objects.cache[id_to_use][key]
|
||||||
|
except KeyError:
|
||||||
|
if context is None:
|
||||||
|
return NotFound
|
||||||
|
|
||||||
|
context = context.get_parent()
|
||||||
|
|
||||||
|
def add_to_short_term_memory(self, context: ExecutionContext | None, key: str, value: Any):
|
||||||
|
if context:
|
||||||
|
context.stm = True
|
||||||
|
id_to_use = context.id
|
||||||
|
else:
|
||||||
|
id_to_use = SheerkaMemory.GLOBAL
|
||||||
|
|
||||||
|
if id_to_use in self.short_term_objects.cache:
|
||||||
|
self.short_term_objects.cache[id_to_use][key] = value
|
||||||
|
else:
|
||||||
|
self.short_term_objects.put(id_to_use, {key: value})
|
||||||
|
|
||||||
|
def list_short_term_memory(self, context: ExecutionContext | None):
|
||||||
|
"""
|
||||||
|
list all short term memory data (stm data)
|
||||||
|
:param context:
|
||||||
|
:type context:
|
||||||
|
:return:
|
||||||
|
:rtype:
|
||||||
|
"""
|
||||||
|
res = self.short_term_objects.cache[self.GLOBAL].copy() if self.GLOBAL in self.short_term_objects.cache else {}
|
||||||
|
if context is None:
|
||||||
|
return res
|
||||||
|
|
||||||
|
contexts = [context] + list(context.get_parents())
|
||||||
|
for ec in reversed(contexts):
|
||||||
|
try:
|
||||||
|
res.update(self.short_term_objects.cache[ec.id])
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return res
|
||||||
@@ -0,0 +1,384 @@
|
|||||||
|
import ast
|
||||||
|
import functools
|
||||||
|
import traceback
|
||||||
|
from dataclasses import dataclass, field
|
||||||
|
|
||||||
|
from common.ast_utils import NamesWithAttributesVisitor, UnreferencedNamesVisitor
|
||||||
|
from common.global_symbols import NoFirstToken, NotFound, NotInit, Removed
|
||||||
|
from common.utils import dict_product
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.ExecutionContext import ContextHint, ExecutionContext
|
||||||
|
from core.concept import Concept
|
||||||
|
from core.error import ErrorContext, ErrorObj, MethodAccessError
|
||||||
|
from core.python_fragment import PythonFragment
|
||||||
|
from services.BaseService import BaseService
|
||||||
|
|
||||||
|
TO_DISABLED = ["breakpoint", "callable", "compile", "delattr", "eval", "exec", "exit", "input", "locals", "open",
|
||||||
|
"print", "quit", "setattr"]
|
||||||
|
|
||||||
|
|
||||||
|
class ReservedNotInitClass:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
ReservedNotInit = ReservedNotInitClass()
|
||||||
|
|
||||||
|
sheerka_globals = {
|
||||||
|
"Concept": Concept,
|
||||||
|
"BuiltinConcepts": BuiltinConcepts,
|
||||||
|
"NotInit": NotInit,
|
||||||
|
"NotFound": NotFound,
|
||||||
|
"Removed": Removed,
|
||||||
|
"NoFirstToken": NoFirstToken,
|
||||||
|
}
|
||||||
|
sheerka_globals.update(dict(__builtins__))
|
||||||
|
|
||||||
|
|
||||||
|
class Expando:
|
||||||
|
def __init__(self, name, bag):
|
||||||
|
self.__name = name
|
||||||
|
for k, v in bag.items():
|
||||||
|
setattr(self, k, v)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"{vars(self)}"
|
||||||
|
|
||||||
|
def get_name(self):
|
||||||
|
return self.__name
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if id(other) == id(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not isinstance(other, Expando):
|
||||||
|
return False
|
||||||
|
|
||||||
|
if other.get_name() != self.get_name():
|
||||||
|
return False
|
||||||
|
|
||||||
|
for k, v in vars(self).items():
|
||||||
|
if getattr(other, k) != v:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
hash_content = [self.__name] + list(vars(self).keys())
|
||||||
|
return hash(tuple(hash_content))
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class PythonEvalError(ErrorObj):
|
||||||
|
error: Exception
|
||||||
|
source: str
|
||||||
|
traceback: str = field(repr=False)
|
||||||
|
concepts: dict | None = field(repr=False)
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if id(self) == id(other):
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not isinstance(other, PythonEvalError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return isinstance(self.error, type(other.error)) and \
|
||||||
|
self.source == other.source and \
|
||||||
|
self.traceback == other.traceback and \
|
||||||
|
self.concepts == other.concepts
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash(self.error)
|
||||||
|
|
||||||
|
def get_error(self):
|
||||||
|
return self.error
|
||||||
|
|
||||||
|
def get_error_msg(self):
|
||||||
|
return ", ".join(self.error.args)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class EvaluationRef:
|
||||||
|
root: str
|
||||||
|
attr: str
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, EvaluationRef):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.root == other.root and self.attr == other.attr
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.root, self.attr))
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaPython(BaseService):
|
||||||
|
"""
|
||||||
|
This service manage evaluation of python fragments
|
||||||
|
"""
|
||||||
|
NAME = "PythonEvaluator"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.bind_service_method(self.NAME, self.evaluate_python, False, visible=False)
|
||||||
|
|
||||||
|
def evaluate_python(self, context: ExecutionContext, fragment: PythonFragment, global_namespace=None):
|
||||||
|
sheerka = context.sheerka
|
||||||
|
expression_only = False
|
||||||
|
global_namespace = global_namespace or {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
my_globals = self.get_globals(context, fragment, global_namespace, expression_only)
|
||||||
|
except MethodAccessError as ex:
|
||||||
|
if context.in_context(ContextHint.EXPRESSION_ONLY_REQUESTED):
|
||||||
|
# Quick and dirty,
|
||||||
|
# When expression_only, it's normal to have some NameError exceptions
|
||||||
|
error = ErrorContext(self.NAME, context, ex)
|
||||||
|
else:
|
||||||
|
eval_error = PythonEvalError(ex, fragment.source_code, traceback.format_exc(), None)
|
||||||
|
error = ErrorContext(self.NAME, context, eval_error)
|
||||||
|
|
||||||
|
return error
|
||||||
|
|
||||||
|
all_possible_globals = self.get_all_possible_globals(context, my_globals)
|
||||||
|
expect_success = True
|
||||||
|
concepts_entries = None
|
||||||
|
errors = []
|
||||||
|
evaluated = ReservedNotInit
|
||||||
|
my_locals = None
|
||||||
|
|
||||||
|
for globals_ in all_possible_globals:
|
||||||
|
try:
|
||||||
|
# eval
|
||||||
|
tmp_locals = {}
|
||||||
|
evaluated = self.evaluate_ast(fragment, globals_, tmp_locals)
|
||||||
|
my_locals = tmp_locals
|
||||||
|
|
||||||
|
if not expect_success or evaluated:
|
||||||
|
# in this first version, we stop once a success is found
|
||||||
|
# it may not be the best result !
|
||||||
|
break
|
||||||
|
|
||||||
|
except Exception as ex:
|
||||||
|
if concepts_entries is None:
|
||||||
|
# I don't want to init it if no error is raised
|
||||||
|
concepts_entries = self.get_concepts_entries_from_globals(my_globals)
|
||||||
|
eval_error = PythonEvalError(ex,
|
||||||
|
fragment.source_code,
|
||||||
|
traceback.format_exc(),
|
||||||
|
self.get_concepts_values_from_globals(globals_, concepts_entries))
|
||||||
|
errors.append(eval_error)
|
||||||
|
|
||||||
|
# add local namespace to stm
|
||||||
|
if my_locals:
|
||||||
|
for k, v in my_locals.items():
|
||||||
|
sheerka.add_to_short_term_memory(context, k, v)
|
||||||
|
|
||||||
|
return ErrorContext(self.NAME, context, errors) if evaluated == ReservedNotInit else evaluated
|
||||||
|
|
||||||
|
def get_globals(self, context, fragment, global_namespace, expression_only):
|
||||||
|
"""
|
||||||
|
Creates the globals variables
|
||||||
|
:param context:
|
||||||
|
:param fragment:
|
||||||
|
:type fragment:
|
||||||
|
:param global_namespace:
|
||||||
|
:type global_namespace:
|
||||||
|
:param expression_only:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
unreferenced_names_visitor = UnreferencedNamesVisitor(context)
|
||||||
|
names = unreferenced_names_visitor.get_names(fragment.ast_tree)
|
||||||
|
if "sheerka" in names:
|
||||||
|
sheerka_names = set()
|
||||||
|
visitor = NamesWithAttributesVisitor()
|
||||||
|
for sequence in visitor.get_sequences(fragment.ast_tree, "sheerka"):
|
||||||
|
if len(sequence) > 1:
|
||||||
|
sheerka_names.add(sequence[1])
|
||||||
|
else:
|
||||||
|
sheerka_names = None
|
||||||
|
|
||||||
|
return self.create_namespace(context,
|
||||||
|
names, # names to look for
|
||||||
|
sheerka_names, # sheerka methods
|
||||||
|
fragment.namespace, # objects from python fragment => local namespace
|
||||||
|
global_namespace, # global namespace
|
||||||
|
expression_only)
|
||||||
|
|
||||||
|
def get_sheerka_method(self, context, who, name, expression_only):
|
||||||
|
try:
|
||||||
|
method = context.sheerka.sheerka_methods[name]
|
||||||
|
if expression_only and method.has_side_effect:
|
||||||
|
raise MethodAccessError(name)
|
||||||
|
else:
|
||||||
|
method_to_use = self.inject_context(context)(method.method) \
|
||||||
|
if name in context.sheerka.methods_with_context \
|
||||||
|
else method.method
|
||||||
|
|
||||||
|
return method_to_use
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def create_namespace(self, context,
|
||||||
|
names: list,
|
||||||
|
sheerka_objects: dict | None,
|
||||||
|
local_namespace: dict,
|
||||||
|
global_namespace: dict,
|
||||||
|
expression_only: bool):
|
||||||
|
"""
|
||||||
|
Create a namespace for the requested names
|
||||||
|
:param context:
|
||||||
|
:param names: requested names
|
||||||
|
:param sheerka_objects: requested sheerka names (ex sheerka.isinstance)
|
||||||
|
:param local_namespace:
|
||||||
|
:type local_namespace:
|
||||||
|
:param global_namespace:
|
||||||
|
:type global_namespace:
|
||||||
|
:param expression_only: if true, discard method that can alter the global state
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
result = {}
|
||||||
|
|
||||||
|
for name in names:
|
||||||
|
if name == "in_context":
|
||||||
|
result[name] = context.in_context
|
||||||
|
continue
|
||||||
|
|
||||||
|
# need to add it manually to avoid conflict with sheerka.isinstance
|
||||||
|
if name == "isinstance":
|
||||||
|
result["isinstance"] = context.sheerka.extended_isinstance
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not (expression_only and name in TO_DISABLED) and name in sheerka_globals:
|
||||||
|
result[name] = sheerka_globals[name]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# support reference to sheerka
|
||||||
|
if name.lower() == "sheerka":
|
||||||
|
bag = {}
|
||||||
|
for sheerka_name in sheerka_objects:
|
||||||
|
if (method := self.get_sheerka_method(context,
|
||||||
|
context.who,
|
||||||
|
sheerka_name,
|
||||||
|
expression_only)) is not None:
|
||||||
|
bag[sheerka_name] = method
|
||||||
|
result[name] = Expando("sheerka", bag)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# search in short term memory
|
||||||
|
if (obj := context.get_from_short_term_memory(name)) is not NotFound:
|
||||||
|
context.log(f"Resolving '{name}'. Using value found in STM.")
|
||||||
|
result[name] = obj
|
||||||
|
continue
|
||||||
|
#
|
||||||
|
# # search in memory
|
||||||
|
# if (obj := context.sheerka.get_last_from_memory(context, name)) is not NotFound:
|
||||||
|
# context.log(f"Resolving '{name}'. Using value found in Long Term Memory.", who)
|
||||||
|
# result[name] = obj.obj
|
||||||
|
# continue
|
||||||
|
|
||||||
|
# search in sheerka methods
|
||||||
|
if (method := self.get_sheerka_method(context, context.who, name, expression_only)) is not None:
|
||||||
|
result[name] = method
|
||||||
|
continue
|
||||||
|
|
||||||
|
# search in current node (if the name was found during the parsing)
|
||||||
|
# Local namespace references must be evaluated
|
||||||
|
if name in local_namespace:
|
||||||
|
context.log(f"Resolving '{name}'. Using value from local namespace.")
|
||||||
|
result[name] = self.resolve_object(context, name, local_namespace[name], global_namespace)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# global namespace references are returned as is
|
||||||
|
if name in global_namespace:
|
||||||
|
result[name] = global_namespace[name]
|
||||||
|
continue
|
||||||
|
|
||||||
|
# at last, try to instantiate a new concept
|
||||||
|
if (metadata := context.sheerka.get_by_name(name)) != NotFound:
|
||||||
|
context.log(f"Resolving '{name}'. Instantiating new concept.")
|
||||||
|
result[name] = context.sheerka.new(metadata)
|
||||||
|
|
||||||
|
context.log(f"...'{name}' is not found or cannot be instantiated. Skipping.")
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def resolve_object(context, attr_name, to_resolve, global_namespace):
|
||||||
|
if isinstance(to_resolve, EvaluationRef):
|
||||||
|
return getattr(global_namespace[to_resolve.root], to_resolve.attr)
|
||||||
|
|
||||||
|
raise AttributeError(attr_name)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def evaluate_ast(fragment, my_globals, my_locals):
|
||||||
|
compiled = fragment.get_compiled()
|
||||||
|
|
||||||
|
if isinstance(compiled, list):
|
||||||
|
exec(compiled[0], my_globals, my_locals)
|
||||||
|
return eval(compiled[1], my_globals, my_locals)
|
||||||
|
elif isinstance(fragment.ast_tree, ast.Expression):
|
||||||
|
return eval(compiled, my_globals, my_locals)
|
||||||
|
else:
|
||||||
|
exec(compiled, my_globals, my_locals)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_all_possible_globals(context, my_globals):
|
||||||
|
"""
|
||||||
|
From a dictionary of globals (str, obj)
|
||||||
|
Creates as many globals as there are combination between a concept and its body
|
||||||
|
Example:
|
||||||
|
if the entry 'foo': Concept("foo", body="something")
|
||||||
|
2 globals will be created
|
||||||
|
one with foo: Concept("foo") # we keep the concept as an object
|
||||||
|
one with foo: 'something' # we substitute its value
|
||||||
|
:param context:
|
||||||
|
:param my_globals:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
|
||||||
|
# first pass, get all the non concepts or concepts without a body
|
||||||
|
# Note that we consider that all concepts are evaluated
|
||||||
|
# In the future, it may be a good optimisation to defer the evaluation of the body
|
||||||
|
# until the python evaluation fails
|
||||||
|
fixed_values = {}
|
||||||
|
concepts_with_body = {}
|
||||||
|
for k, v in my_globals.items():
|
||||||
|
if not isinstance(v, Concept) or not v.get_runtime_info().is_evaluated or v.body is NotInit:
|
||||||
|
fixed_values[k] = v
|
||||||
|
else:
|
||||||
|
concepts_with_body[k] = v
|
||||||
|
|
||||||
|
# make the product the rest as cartesian product
|
||||||
|
res = [fixed_values]
|
||||||
|
for k, v in concepts_with_body.items():
|
||||||
|
res = dict_product(res, [{k: v}, {k: context.sheerka.objvalue(v)}])
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def inject_context(context):
|
||||||
|
"""
|
||||||
|
function Decorator used to inject the context in methods that needed
|
||||||
|
TODO : Maybe replace by 'partial' from functool
|
||||||
|
:param context:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
|
||||||
|
def wrapped(func):
|
||||||
|
@functools.wraps(func)
|
||||||
|
def inner(*args, **kwargs):
|
||||||
|
return func(context, *args, **kwargs)
|
||||||
|
|
||||||
|
return inner
|
||||||
|
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_concepts_entries_from_globals(my_globals):
|
||||||
|
return [k for k, v in my_globals.items() if isinstance(v, Concept)]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_concepts_values_from_globals(my_globals, names):
|
||||||
|
return {name: my_globals[name] for name in names}
|
||||||
@@ -8,6 +8,24 @@ from core.Sheerka import Sheerka
|
|||||||
from sdp.sheerkaDataProvider import SheerkaDataProvider
|
from sdp.sheerkaDataProvider import SheerkaDataProvider
|
||||||
|
|
||||||
|
|
||||||
|
class DummyObj:
|
||||||
|
def __init__(self, a: str = "hello", b: str = "world"):
|
||||||
|
self.a = a
|
||||||
|
self.b = b
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if not isinstance(other, DummyObj):
|
||||||
|
return False
|
||||||
|
|
||||||
|
return self.a == other.a and self.b == other.b
|
||||||
|
|
||||||
|
def __hash__(self):
|
||||||
|
return hash((self.a, self.b))
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return f"Dummy('{self.a}', '{self.b}')"
|
||||||
|
|
||||||
|
|
||||||
class BaseTest:
|
class BaseTest:
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def sdp(self) -> SheerkaDataProvider:
|
def sdp(self) -> SheerkaDataProvider:
|
||||||
|
|||||||
@@ -46,7 +46,7 @@ def test_i_can_put_the_same_key_several_times():
|
|||||||
assert cache.lru == ["key2", "key1"]
|
assert cache.lru == ["key2", "key1"]
|
||||||
|
|
||||||
|
|
||||||
def test_none_is_returned_when_not_found():
|
def test_not_found_is_returned_when_not_found():
|
||||||
cache = FastCache()
|
cache = FastCache()
|
||||||
assert cache.get("foo") is NotFound
|
assert cache.get("foo") is NotFound
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,50 @@
|
|||||||
|
import ast
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from common.ast_utils import NamesWithAttributesVisitor, UnreferencedNamesVisitor, UnreferencedVariablesVisitor
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("source, expected", [
|
||||||
|
("a,b", {"a", "b"}),
|
||||||
|
("isinstance(a, int)", {"isinstance", "a", "int"}),
|
||||||
|
("date.today()", {"date"}),
|
||||||
|
("test()", {"test"}),
|
||||||
|
("sheerka.test()", {"sheerka"}),
|
||||||
|
("for i in range(10): pass", set()),
|
||||||
|
("func(x=a, y=b)", {"func", "a", "b"}),
|
||||||
|
|
||||||
|
])
|
||||||
|
def test_i_can_get_unreferenced_names_from_simple_expressions(context, source, expected):
|
||||||
|
ast_ = ast.parse(source)
|
||||||
|
visitor = UnreferencedNamesVisitor(context)
|
||||||
|
visitor.visit(ast_)
|
||||||
|
|
||||||
|
assert visitor.names == expected
|
||||||
|
|
||||||
|
|
||||||
|
def test_name_with_attribute():
|
||||||
|
# Looks for all attributes for a given name
|
||||||
|
ast_ = ast.parse("foo.bar.baz", "<src>", mode="exec")
|
||||||
|
assert NamesWithAttributesVisitor().get_sequences(ast_, "foo") == [["foo", "bar", "baz"]]
|
||||||
|
|
||||||
|
# It parses all expressions / statements
|
||||||
|
ast_ = ast.parse("foo.bar.baz; one.two.three; foo.bar", "<src>", mode="exec")
|
||||||
|
assert NamesWithAttributesVisitor().get_sequences(ast_, "foo") == [["foo", "bar", "baz"], ["foo", "bar"]]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("source, expected", [
|
||||||
|
("a,b", {"a", "b"}),
|
||||||
|
("isinstance(a, int)", {"a", "int"}),
|
||||||
|
("date.today()", set()),
|
||||||
|
("test()", set()),
|
||||||
|
("sheerka.test()", set()),
|
||||||
|
("for i in range(10): pass", set()),
|
||||||
|
("func(x=a, y=b)", {"a", "b", "x", "y"}),
|
||||||
|
])
|
||||||
|
def test_i_can_get_unreferenced_variables_from_simple_expressions(context, source, expected):
|
||||||
|
ast_ = ast.parse(source)
|
||||||
|
visitor = UnreferencedVariablesVisitor(context)
|
||||||
|
visitor.visit(ast_)
|
||||||
|
|
||||||
|
assert visitor.names == expected
|
||||||
@@ -2,9 +2,9 @@ from dataclasses import dataclass
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from common.utils import decode_enum, get_class, to_dict, str_concept, unstr_concept
|
from common.utils import decode_enum, dict_product, get_class, get_text_from_tokens, str_concept, to_dict, unstr_concept
|
||||||
from helpers import get_concept
|
from helpers import get_concept
|
||||||
from parsers.tokenizer import Keywords, Token, TokenKind
|
from parsers.tokenizer import Keywords, Token, TokenKind, Tokenizer
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
@@ -120,3 +120,55 @@ def test_i_can_decode_enum(text, expected):
|
|||||||
])
|
])
|
||||||
def test_i_can_to_dict(items, expected):
|
def test_i_can_to_dict(items, expected):
|
||||||
assert to_dict(items, lambda obj: obj.prop1) == expected
|
assert to_dict(items, lambda obj: obj.prop1) == expected
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, expected_text", [
|
||||||
|
("hello world", "hello world"),
|
||||||
|
("'hello' 'world'", "'hello' 'world'"),
|
||||||
|
("def concept a from", "def concept a from"),
|
||||||
|
("()[]{}1=1.5+-/*><&é", "()[]{}1=1.5+-/*><&é"),
|
||||||
|
("execute(c:concept_name:)", "execute(c:concept_name:)")
|
||||||
|
|
||||||
|
])
|
||||||
|
def test_i_can_get_text_from_tokens(text, expected_text):
|
||||||
|
tokens = list(Tokenizer(text))
|
||||||
|
assert get_text_from_tokens(tokens) == expected_text
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, custom, expected_text", [
|
||||||
|
("execute(c:concept_name:)", {TokenKind.CONCEPT: lambda t: f"__C__{t.value[0]}"}, "execute(__C__concept_name)")
|
||||||
|
])
|
||||||
|
def test_i_can_get_text_from_tokens_with_custom_switcher(text, custom, expected_text):
|
||||||
|
tokens = list(Tokenizer(text))
|
||||||
|
assert get_text_from_tokens(tokens, custom) == expected_text
|
||||||
|
|
||||||
|
|
||||||
|
def test_i_can_track_tokens():
|
||||||
|
text = "execute(c:name1: if r:#id: else c:name2:)"
|
||||||
|
switcher = {TokenKind.CONCEPT: lambda t: f"__CONCEPT__{t.value[0]}",
|
||||||
|
TokenKind.RULE: lambda t: f"__RULE__{t.value[1]}"}
|
||||||
|
tracker = {}
|
||||||
|
tokens = list(Tokenizer(text))
|
||||||
|
get_text_from_tokens(tokens, switcher, tracker)
|
||||||
|
assert len(tracker) == 3
|
||||||
|
assert tracker["__CONCEPT__name1"] == tokens[2]
|
||||||
|
assert tracker["__RULE__id"] == tokens[6]
|
||||||
|
assert tracker["__CONCEPT__name2"] == tokens[10]
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("a,b,expected", [
|
||||||
|
([], [], []),
|
||||||
|
([{"a": "a", "b": "b"}], [], [{"a": "a", "b": "b"}]),
|
||||||
|
([], [{"a": "a", "b": "b"}], [{"a": "a", "b": "b"}]),
|
||||||
|
([{"a": "a", "b": "b"}], [{"d": "d1"}, {"d": "d2"}], [{"a": "a", "b": "b", "d": "d1"},
|
||||||
|
{"a": "a", "b": "b", "d": "d2"}]),
|
||||||
|
([{"d": "d1"}, {"d": "d2"}], [{"a": "a", "b": "b"}], [{"a": "a", "b": "b", "d": "d1"},
|
||||||
|
{"a": "a", "b": "b", "d": "d2"}]),
|
||||||
|
([{"a": "a", "b": "b"}], [{"d": "d", "e": "e"}], [{"a": "a", "b": "b", "d": "d", "e": "e"}]),
|
||||||
|
([{"a": "a"}, {"b": "b"}], [{"d": "d"}, {"e": "e"}], [{"a": "a", "d": "d"},
|
||||||
|
{"a": "a", "e": "e"},
|
||||||
|
{"b": "b", "d": "d"},
|
||||||
|
{"b": "b", "e": "e"}])
|
||||||
|
])
|
||||||
|
def test_dict_product(a, b, expected):
|
||||||
|
assert dict_product(a, b) == expected
|
||||||
|
|||||||
+10
-4
@@ -1,6 +1,7 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from helpers import GetNextId
|
from helpers import GetNextId
|
||||||
|
from server.authentication import User
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
@@ -25,12 +26,12 @@ def on_new_module(sheerka, request):
|
|||||||
:rtype:
|
:rtype:
|
||||||
"""
|
"""
|
||||||
from core.Event import Event
|
from core.Event import Event
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
module_name = request.module.__name__.split(".")[-1]
|
module_name = request.module.__name__.split(".")[-1]
|
||||||
context = ExecutionContext("test",
|
context = ExecutionContext("test",
|
||||||
Event(message=f"Executing module {module_name}"),
|
Event(message=f"Executing module {module_name}"),
|
||||||
sheerka,
|
sheerka,
|
||||||
ExecutionContextActions.TESTING,
|
ContextActions.TESTING,
|
||||||
None)
|
None)
|
||||||
|
|
||||||
ontology = sheerka.om.push_ontology(module_name)
|
ontology = sheerka.om.push_ontology(module_name)
|
||||||
@@ -41,12 +42,12 @@ def on_new_module(sheerka, request):
|
|||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
def context(sheerka):
|
def context(sheerka):
|
||||||
from core.Event import Event
|
from core.Event import Event
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
|
|
||||||
return ExecutionContext("test",
|
return ExecutionContext("test",
|
||||||
Event(message=""),
|
Event(message=""),
|
||||||
sheerka,
|
sheerka,
|
||||||
ExecutionContextActions.TESTING,
|
ContextActions.TESTING,
|
||||||
None)
|
None)
|
||||||
|
|
||||||
|
|
||||||
@@ -55,6 +56,11 @@ def next_id():
|
|||||||
return GetNextId()
|
return GetNextId()
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def user():
|
||||||
|
return User(username="johan doe", email="johan.doe@sheerka.com", firstname="johan", lastname="doe")
|
||||||
|
|
||||||
|
|
||||||
class TestUsingFileBasedSheerka:
|
class TestUsingFileBasedSheerka:
|
||||||
@pytest.fixture(scope="class")
|
@pytest.fixture(scope="class")
|
||||||
def sheerka(self):
|
def sheerka(self):
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ def test_i_can_retrieve_concept_properties():
|
|||||||
assert foo.id == "1001"
|
assert foo.id == "1001"
|
||||||
assert foo.str_id == "c:#1001:"
|
assert foo.str_id == "c:#1001:"
|
||||||
assert foo.all_attrs() == ('#where#', '#pre#', '#post#', '#body#', '#ret#', 'a', 'b')
|
assert foo.all_attrs() == ('#where#', '#pre#', '#post#', '#body#', '#ret#', 'a', 'b')
|
||||||
assert foo.get_definition_digest() == "3a2cfcda8ffd0d99a7f8c7d2f1ffc4a99fc96162f3be7b9875f30751d3691af6"
|
assert foo.get_definition_digest() == "13b61f45934a802b5486a1bdd60e404b32378a801408769cd584e3b3b7518cc2"
|
||||||
|
|
||||||
# sanity check to make sure that 'get_concept' works as expected
|
# sanity check to make sure that 'get_concept' works as expected
|
||||||
assert foo.get_metadata().variables == (("a", NotInit), ("b", NotInit))
|
assert foo.get_metadata().variables == (("a", NotInit), ("b", NotInit))
|
||||||
@@ -20,6 +20,7 @@ def test_i_can_set_and_get_value():
|
|||||||
foo = get_concept("foo", variables=["a"])
|
foo = get_concept("foo", variables=["a"])
|
||||||
foo.set_value("a", "some value")
|
foo.set_value("a", "some value")
|
||||||
assert foo.get_value("a") == "some value"
|
assert foo.get_value("a") == "some value"
|
||||||
|
assert foo.a == "some value"
|
||||||
|
|
||||||
|
|
||||||
def test_i_can_set_and_get_value_from_bound_attr():
|
def test_i_can_set_and_get_value_from_bound_attr():
|
||||||
|
|||||||
@@ -1,15 +1,15 @@
|
|||||||
from core.Event import Event
|
from core.Event import Event
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
|
|
||||||
|
|
||||||
def test_i_can_create_execution_context(sheerka):
|
def test_i_can_create_execution_context(sheerka):
|
||||||
event = Event("myEvent", "fake_userid")
|
event = Event("myEvent", "fake_userid")
|
||||||
context1 = ExecutionContext("who", event, sheerka, ExecutionContextActions.TESTING, "value1", "my desc")
|
context1 = ExecutionContext("who", event, sheerka, ContextActions.TESTING, "value1", "my desc")
|
||||||
|
|
||||||
assert context1.who == "who"
|
assert context1.who == "who"
|
||||||
assert context1.event == event
|
assert context1.event == event
|
||||||
assert context1.sheerka == sheerka
|
assert context1.sheerka == sheerka
|
||||||
assert context1.action == ExecutionContextActions.TESTING
|
assert context1.action == ContextActions.TESTING
|
||||||
assert context1.action_context == "value1"
|
assert context1.action_context == "value1"
|
||||||
assert context1.desc == "my desc"
|
assert context1.desc == "my desc"
|
||||||
assert context1.id == 0
|
assert context1.id == 0
|
||||||
@@ -18,12 +18,12 @@ def test_i_can_create_execution_context(sheerka):
|
|||||||
|
|
||||||
def test_i_can_push(sheerka):
|
def test_i_can_push(sheerka):
|
||||||
event = Event("test")
|
event = Event("test")
|
||||||
context = ExecutionContext("who", event, sheerka, ExecutionContextActions.TESTING, "value")
|
context = ExecutionContext("who", event, sheerka, ContextActions.TESTING, "value")
|
||||||
with context.push("pusher", ExecutionContextActions.PARSING, "action_context", "my desc") as sub_context:
|
with context.push("pusher", ContextActions.PARSING, "action_context", "my desc") as sub_context:
|
||||||
assert sub_context.who == "pusher"
|
assert sub_context.who == "pusher"
|
||||||
assert sub_context.event == event
|
assert sub_context.event == event
|
||||||
assert sub_context.sheerka == sheerka
|
assert sub_context.sheerka == sheerka
|
||||||
assert sub_context.action == ExecutionContextActions.PARSING
|
assert sub_context.action == ContextActions.PARSING
|
||||||
assert sub_context.action_context == "action_context"
|
assert sub_context.action_context == "action_context"
|
||||||
assert sub_context.desc == "my desc"
|
assert sub_context.desc == "my desc"
|
||||||
assert sub_context.id == context.id + 1
|
assert sub_context.id == context.id + 1
|
||||||
@@ -34,11 +34,11 @@ def test_i_can_increment_ids(sheerka):
|
|||||||
# If the event is the same, the id is incremented
|
# If the event is the same, the id is incremented
|
||||||
|
|
||||||
event = Event("TEST::myEvent", "fake_userid")
|
event = Event("TEST::myEvent", "fake_userid")
|
||||||
context1 = ExecutionContext("who", event, sheerka, ExecutionContextActions.TESTING, "value")
|
context1 = ExecutionContext("who", event, sheerka, ContextActions.TESTING, "value")
|
||||||
context2 = context1.push("who1", ExecutionContextActions.TESTING, "value1")
|
context2 = context1.push("who1", ContextActions.TESTING, "value1")
|
||||||
context3 = context2.push("who2", ExecutionContextActions.TESTING, "value2")
|
context3 = context2.push("who2", ContextActions.TESTING, "value2")
|
||||||
context4 = context1.push("who1", ExecutionContextActions.TESTING, "value3")
|
context4 = context1.push("who1", ContextActions.TESTING, "value3")
|
||||||
context5 = ExecutionContext("who", event, sheerka, ExecutionContextActions.TESTING, "value4")
|
context5 = ExecutionContext("who", event, sheerka, ContextActions.TESTING, "value4")
|
||||||
|
|
||||||
assert context1.id == 0
|
assert context1.id == 0
|
||||||
assert context2.id == 1
|
assert context2.id == 1
|
||||||
@@ -47,15 +47,15 @@ def test_i_can_increment_ids(sheerka):
|
|||||||
assert context5.id == 4
|
assert context5.id == 4
|
||||||
|
|
||||||
event2 = Event("TEST::myEvent2", "fake_userid")
|
event2 = Event("TEST::myEvent2", "fake_userid")
|
||||||
context6 = ExecutionContext("who", event2, sheerka, ExecutionContextActions.TESTING, "value")
|
context6 = ExecutionContext("who", event2, sheerka, ContextActions.TESTING, "value")
|
||||||
assert context6.id == 0
|
assert context6.id == 0
|
||||||
|
|
||||||
|
|
||||||
def test_i_can_manage_global_hints(context):
|
def test_i_can_manage_global_hints(context):
|
||||||
context2 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context2 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
context3 = context2.push("pusher", ExecutionContextActions.TESTING, None)
|
context3 = context2.push("pusher", ContextActions.TESTING, None)
|
||||||
context4 = context3.push("pusher", ExecutionContextActions.TESTING, None)
|
context4 = context3.push("pusher", ContextActions.TESTING, None)
|
||||||
context5 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context5 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
|
|
||||||
context.global_hints.add("new_hint")
|
context.global_hints.add("new_hint")
|
||||||
assert context.global_hints == {"new_hint"}
|
assert context.global_hints == {"new_hint"}
|
||||||
@@ -75,11 +75,11 @@ def test_i_can_manage_global_hints(context):
|
|||||||
def test_i_can_manage_protected_hint(context):
|
def test_i_can_manage_protected_hint(context):
|
||||||
# Note that protected hint only works if the hint is added BEFORE the creation of the child
|
# Note that protected hint only works if the hint is added BEFORE the creation of the child
|
||||||
context.protected_hints.add("new_hint")
|
context.protected_hints.add("new_hint")
|
||||||
context2 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context2 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
context3 = context2.push("pusher", ExecutionContextActions.TESTING, None)
|
context3 = context2.push("pusher", ContextActions.TESTING, None)
|
||||||
context3.protected_hints.add("another_hint")
|
context3.protected_hints.add("another_hint")
|
||||||
context4 = context3.push("pusher", ExecutionContextActions.TESTING, None)
|
context4 = context3.push("pusher", ContextActions.TESTING, None)
|
||||||
context5 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context5 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
|
|
||||||
assert context.protected_hints == {"new_hint"}
|
assert context.protected_hints == {"new_hint"}
|
||||||
assert context2.protected_hints == {"new_hint"}
|
assert context2.protected_hints == {"new_hint"}
|
||||||
@@ -90,11 +90,11 @@ def test_i_can_manage_protected_hint(context):
|
|||||||
|
|
||||||
def test_i_can_manage_private_hints(context):
|
def test_i_can_manage_private_hints(context):
|
||||||
context.private_hints.add("new_hint")
|
context.private_hints.add("new_hint")
|
||||||
context2 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context2 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
context3 = context2.push("pusher", ExecutionContextActions.TESTING, None)
|
context3 = context2.push("pusher", ContextActions.TESTING, None)
|
||||||
context3.private_hints.add("another_hint")
|
context3.private_hints.add("another_hint")
|
||||||
context4 = context3.push("pusher", ExecutionContextActions.TESTING, None)
|
context4 = context3.push("pusher", ContextActions.TESTING, None)
|
||||||
context5 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context5 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
|
|
||||||
assert context.private_hints == {"new_hint"}
|
assert context.private_hints == {"new_hint"}
|
||||||
assert context2.private_hints == set()
|
assert context2.private_hints == set()
|
||||||
@@ -103,10 +103,24 @@ def test_i_can_manage_private_hints(context):
|
|||||||
assert context5.private_hints == set()
|
assert context5.private_hints == set()
|
||||||
|
|
||||||
|
|
||||||
|
def test_i_can_check_if_hints_are_in_context(context):
|
||||||
|
context.private_hints.add("private_hint")
|
||||||
|
context.protected_hints.add("protected_hint")
|
||||||
|
context.global_hints.add("global_hint")
|
||||||
|
assert context.in_context("private_hint")
|
||||||
|
assert context.in_context("protected_hint")
|
||||||
|
assert context.in_context("global_hint")
|
||||||
|
|
||||||
|
context2 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
|
assert not context2.in_context("private_hint")
|
||||||
|
assert context2.in_context("protected_hint")
|
||||||
|
assert context2.in_context("global_hint")
|
||||||
|
|
||||||
|
|
||||||
def test_i_can_keep_track_of_children(context):
|
def test_i_can_keep_track_of_children(context):
|
||||||
context2 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context2 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
context3 = context.push("pusher", ExecutionContextActions.TESTING, None)
|
context3 = context.push("pusher", ContextActions.TESTING, None)
|
||||||
context4 = context2.push("pusher2", ExecutionContextActions.TESTING, None)
|
context4 = context2.push("pusher2", ContextActions.TESTING, None)
|
||||||
|
|
||||||
assert len(context._children) == 2
|
assert len(context._children) == 2
|
||||||
assert len(context2._children) == 1
|
assert len(context2._children) == 1
|
||||||
@@ -115,13 +129,13 @@ def test_i_can_keep_track_of_children(context):
|
|||||||
|
|
||||||
|
|
||||||
def test_i_can_get_children(context):
|
def test_i_can_get_children(context):
|
||||||
context1 = context.push("child 1", ExecutionContextActions.TESTING, None)
|
context1 = context.push("child 1", ContextActions.TESTING, None)
|
||||||
context2 = context.push("child 2", ExecutionContextActions.TESTING, None)
|
context2 = context.push("child 2", ContextActions.TESTING, None)
|
||||||
context3 = context.push("child 3", ExecutionContextActions.TESTING, None)
|
context3 = context.push("child 3", ContextActions.TESTING, None)
|
||||||
context21 = context2.push("child 21", ExecutionContextActions.TESTING, None)
|
context21 = context2.push("child 21", ContextActions.TESTING, None)
|
||||||
context22 = context2.push("child 22", ExecutionContextActions.TESTING, None)
|
context22 = context2.push("child 22", ContextActions.TESTING, None)
|
||||||
context211 = context21.push("child 211", ExecutionContextActions.TESTING, None)
|
context211 = context21.push("child 211", ContextActions.TESTING, None)
|
||||||
context31 = context3.push("child 31", ExecutionContextActions.TESTING, None)
|
context31 = context3.push("child 31", ContextActions.TESTING, None)
|
||||||
|
|
||||||
assert list(context1.get_children()) == []
|
assert list(context1.get_children()) == []
|
||||||
|
|
||||||
@@ -149,3 +163,15 @@ def test_i_can_get_children(context):
|
|||||||
context3,
|
context3,
|
||||||
context31,
|
context31,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def test_i_can_get_parents(context):
|
||||||
|
context1 = context.push("child 1", ContextActions.TESTING, None)
|
||||||
|
context2 = context1.push("child 2", ContextActions.TESTING, None)
|
||||||
|
context3 = context2.push("child 3", ContextActions.TESTING, None)
|
||||||
|
|
||||||
|
assert list(context3.get_parents()) == [context2, context1, context]
|
||||||
|
assert list(context3.get_parents(level=1)) == [context2]
|
||||||
|
assert list(context3.get_parents(level=2)) == [context2, context1]
|
||||||
|
assert list(context3.get_parents(level=3)) == [context2, context1, context]
|
||||||
|
assert list(context3.get_parents(level=4)) == [context2, context1, context]
|
||||||
|
|||||||
@@ -0,0 +1,58 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest
|
||||||
|
from conftest import NewOntology
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.error import ErrorContext
|
||||||
|
from evaluators.PythonEvaluator import PythonEvaluator
|
||||||
|
from evaluators.PythonParser import PythonParser
|
||||||
|
from helpers import _rv, _rvf, define_new_concept, get_concepts, get_metadata
|
||||||
|
from parsers.ParserInput import ParserInput
|
||||||
|
|
||||||
|
|
||||||
|
def get_parser_input_from(sheerka, context, command):
|
||||||
|
pi = ParserInput(command)
|
||||||
|
pi.init()
|
||||||
|
parser_start = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=pi))
|
||||||
|
ret = PythonParser().eval(context, None, parser_start)
|
||||||
|
return ret.new[0]
|
||||||
|
|
||||||
|
|
||||||
|
class TestPythonEvaluator(BaseTest):
|
||||||
|
@pytest.fixture()
|
||||||
|
def evaluator(self, sheerka):
|
||||||
|
return sheerka.evaluators[PythonEvaluator.NAME]
|
||||||
|
|
||||||
|
def test_i_can_match(self, sheerka, context, evaluator):
|
||||||
|
ret_val = _rv(sheerka.newn(BuiltinConcepts.PYTHON_CODE))
|
||||||
|
assert evaluator.matches(context, ret_val).status is True
|
||||||
|
|
||||||
|
ret_val = _rv(sheerka.newn(BuiltinConcepts.UNKNOWN_CONCEPT)) # it responds to USER_INPUT only
|
||||||
|
assert evaluator.matches(context, ret_val).status is False
|
||||||
|
|
||||||
|
ret_val = _rvf(sheerka.newn(BuiltinConcepts.PYTHON_CODE)) # status should be true
|
||||||
|
assert evaluator.matches(context, ret_val).status is False
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, expected", [
|
||||||
|
("1 + 1", 2),
|
||||||
|
("echo('I have access to Sheerka !')", "I have access to Sheerka !"),
|
||||||
|
("sheerka.echo('I have access to Sheerka !')", "I have access to Sheerka !"),
|
||||||
|
("a=10\na", 10),
|
||||||
|
])
|
||||||
|
def test_i_can_evaluate_simple_expression(self, sheerka, context, evaluator, text, expected):
|
||||||
|
start = get_parser_input_from(sheerka, context, text)
|
||||||
|
ret = evaluator.eval(context, None, start)
|
||||||
|
assert ret.eaten == [start]
|
||||||
|
assert len(ret.new) == 1
|
||||||
|
assert ret.new[0].status is True
|
||||||
|
assert ret.new[0].value == expected
|
||||||
|
assert ret.new[0].parents == [start]
|
||||||
|
|
||||||
|
def test_i_can_detect_evaluation_error(self, sheerka, context, evaluator):
|
||||||
|
start = get_parser_input_from(sheerka, context, "a")
|
||||||
|
ret = evaluator.eval(context, None, start)
|
||||||
|
assert ret.eaten == []
|
||||||
|
assert len(ret.new) == 1
|
||||||
|
assert ret.new[0].status is False
|
||||||
|
assert isinstance(ret.new[0].value, ErrorContext)
|
||||||
|
assert ret.new[0].parents == [start]
|
||||||
@@ -0,0 +1,75 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from core.error import ErrorContext
|
||||||
|
from evaluators.PythonParser import PythonParser
|
||||||
|
from helpers import _rv, _rvf
|
||||||
|
from parsers.ParserInput import ParserInput
|
||||||
|
|
||||||
|
|
||||||
|
class TestPythonParser(BaseTest):
|
||||||
|
@pytest.fixture()
|
||||||
|
def evaluator(self, sheerka):
|
||||||
|
return sheerka.evaluators[PythonParser.NAME]
|
||||||
|
|
||||||
|
def test_i_can_match(self, sheerka, context, evaluator):
|
||||||
|
ret_val = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=ParserInput("a command")))
|
||||||
|
assert evaluator.matches(context, ret_val).status is True
|
||||||
|
|
||||||
|
ret_val = _rv(sheerka.newn(BuiltinConcepts.UNKNOWN_CONCEPT)) # it responds to USER_INPUT only
|
||||||
|
assert evaluator.matches(context, ret_val).status is False
|
||||||
|
|
||||||
|
ret_val = _rvf(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=ParserInput("a command"))) # status should be true
|
||||||
|
assert evaluator.matches(context, ret_val).status is False
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text", [
|
||||||
|
"1 + 1",
|
||||||
|
"a = 20"
|
||||||
|
])
|
||||||
|
def test_i_can_parse_python(self, sheerka, context, evaluator, text):
|
||||||
|
pi = ParserInput(text)
|
||||||
|
pi.init()
|
||||||
|
start = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=pi))
|
||||||
|
|
||||||
|
res = evaluator.eval(context, None, start)
|
||||||
|
|
||||||
|
assert res.eaten == [start]
|
||||||
|
assert len(res.new) == 1
|
||||||
|
ret_val = res.new[0]
|
||||||
|
assert ret_val.status is True
|
||||||
|
assert sheerka.isinstance(ret_val.value, BuiltinConcepts.PYTHON_CODE)
|
||||||
|
assert ret_val.parents == [start]
|
||||||
|
|
||||||
|
def test_invalid_python_are_rejected(self, sheerka, context, evaluator):
|
||||||
|
text = "1 + "
|
||||||
|
pi = ParserInput(text)
|
||||||
|
pi.init()
|
||||||
|
start = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=pi))
|
||||||
|
|
||||||
|
res = evaluator.eval(context, None, start)
|
||||||
|
|
||||||
|
assert res.eaten == []
|
||||||
|
assert len(res.new) == 1
|
||||||
|
ret_val = res.new[0]
|
||||||
|
assert ret_val.status is False
|
||||||
|
assert isinstance(ret_val.value, ErrorContext)
|
||||||
|
assert ret_val.parents == [start]
|
||||||
|
|
||||||
|
def test_i_can_detect_concepts(self, sheerka, context, evaluator):
|
||||||
|
pi = ParserInput("c:one: + c:two:")
|
||||||
|
pi.init()
|
||||||
|
start = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=pi))
|
||||||
|
|
||||||
|
res = evaluator.eval(context, None, start)
|
||||||
|
|
||||||
|
assert res.eaten == [start]
|
||||||
|
assert len(res.new) == 1
|
||||||
|
ret_val = res.new[0]
|
||||||
|
assert ret_val.status is True
|
||||||
|
assert sheerka.isinstance(ret_val.value, BuiltinConcepts.PYTHON_CODE)
|
||||||
|
assert ret_val.parents == [start]
|
||||||
|
assert len(ret_val.value.pf.namespace) == 2
|
||||||
|
assert ret_val.value.pf.namespace["__C__KEY_one__ID_00None00__C__"].value == ("one", None)
|
||||||
|
assert ret_val.value.pf.namespace["__C__KEY_two__ID_00None00__C__"].value == ("two", None)
|
||||||
|
|
||||||
@@ -0,0 +1,22 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from core.error import ErrorContext, ErrorObj, MethodAccessError
|
||||||
|
|
||||||
|
|
||||||
|
class DummyErrorObj(ErrorObj):
|
||||||
|
def __init__(self, msg):
|
||||||
|
self.msg = msg
|
||||||
|
|
||||||
|
def get_error_msg(self) -> str:
|
||||||
|
return self.msg
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("error_hint, expected", [
|
||||||
|
("some value", "some value"),
|
||||||
|
(["value a", "value b"], "value a, value b"),
|
||||||
|
(MethodAccessError("a"), "Cannot access method 'a'"),
|
||||||
|
(DummyErrorObj("error msg"), "error msg")
|
||||||
|
])
|
||||||
|
def test_i_can_get_error_msg(context, error_hint, expected):
|
||||||
|
error = ErrorContext("Test", context, error_hint)
|
||||||
|
assert error.get_error_msg() == expected
|
||||||
+2
-4
@@ -2,7 +2,7 @@ from common.global_symbols import NotInit
|
|||||||
from core.ExecutionContext import ExecutionContext
|
from core.ExecutionContext import ExecutionContext
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from core.concept import Concept, ConceptMetadata, DefinitionType
|
from core.concept import Concept, ConceptMetadata, DefinitionType
|
||||||
from core.services.SheerkaConceptManager import ConceptManager
|
from services.SheerkaConceptManager import ConceptManager
|
||||||
|
|
||||||
|
|
||||||
class GetNextId:
|
class GetNextId:
|
||||||
@@ -294,8 +294,6 @@ def get_metadatas(*args, **kwargs):
|
|||||||
def get_concepts(context: ExecutionContext, *concepts, **kwargs) -> list[Concept]:
|
def get_concepts(context: ExecutionContext, *concepts, **kwargs) -> list[Concept]:
|
||||||
"""
|
"""
|
||||||
Simple and quick way to get initialize concepts for a test
|
Simple and quick way to get initialize concepts for a test
|
||||||
:param sheerka:
|
|
||||||
:type sheerka:
|
|
||||||
:param context:
|
:param context:
|
||||||
:type context:
|
:type context:
|
||||||
:param concepts:
|
:param concepts:
|
||||||
@@ -322,7 +320,7 @@ def get_concepts(context: ExecutionContext, *concepts, **kwargs) -> list[Concept
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def define_new_concept(context: ExecutionContext, c: str | Concept) -> Concept:
|
def define_new_concept(context: ExecutionContext, c: str | Concept | ConceptMetadata) -> Concept:
|
||||||
sheerka = context.sheerka
|
sheerka = context.sheerka
|
||||||
if isinstance(c, str):
|
if isinstance(c, str):
|
||||||
retval = sheerka.define_new_concept(context, c)
|
retval = sheerka.define_new_concept(context, c)
|
||||||
|
|||||||
@@ -0,0 +1,18 @@
|
|||||||
|
from base import BaseTest
|
||||||
|
|
||||||
|
|
||||||
|
class TestNonReg1(BaseTest):
|
||||||
|
|
||||||
|
def test_i_can_evaluate_python(self, sheerka, user):
|
||||||
|
res = sheerka.evaluate_user_input("1 + 1", user)
|
||||||
|
assert len(res) == 1
|
||||||
|
ret_val = res[0]
|
||||||
|
assert ret_val.status is True
|
||||||
|
assert ret_val.value == 2
|
||||||
|
|
||||||
|
def test_i_can_evaluate_variable_that_is_not_defined(self, sheerka, user):
|
||||||
|
res = sheerka.evaluate_user_input("a", user)
|
||||||
|
assert len(res) == 1
|
||||||
|
ret_val = res[0]
|
||||||
|
|
||||||
|
assert ret_val.status is False
|
||||||
@@ -1,5 +1,7 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
from parsers.ParserInput import ParserInput
|
from parsers.ParserInput import ParserInput
|
||||||
from parsers.tokenizer import LexerError
|
from parsers.tokenizer import LexerError, TokenKind
|
||||||
|
|
||||||
|
|
||||||
def test_i_can_parser_input():
|
def test_i_can_parser_input():
|
||||||
@@ -12,3 +14,27 @@ def test_i_can_detect_errors():
|
|||||||
parser_input = ParserInput('def concept "a')
|
parser_input = ParserInput('def concept "a')
|
||||||
assert parser_input.init() is False
|
assert parser_input.init() is False
|
||||||
assert isinstance(parser_input.exception, LexerError)
|
assert isinstance(parser_input.exception, LexerError)
|
||||||
|
|
||||||
|
|
||||||
|
def test_can_as_text_and_track_tokens():
|
||||||
|
parser_input = ParserInput("execute(c:name1: if r:#id: else c:name2:)")
|
||||||
|
parser_input.init()
|
||||||
|
|
||||||
|
switcher = {TokenKind.CONCEPT: lambda t: f"__CONCEPT__{t.value[0]}",
|
||||||
|
TokenKind.RULE: lambda t: f"__RULE__{t.value[1]}"}
|
||||||
|
tracker = {}
|
||||||
|
text = parser_input.as_text(switcher, tracker)
|
||||||
|
|
||||||
|
assert text == "execute(__CONCEPT__name1 if __RULE__id else __CONCEPT__name2)"
|
||||||
|
assert len(tracker) == 3
|
||||||
|
assert tracker["__CONCEPT__name1"] == parser_input.all_tokens[2]
|
||||||
|
assert tracker["__RULE__id"] == parser_input.all_tokens[6]
|
||||||
|
assert tracker["__CONCEPT__name2"] == parser_input.all_tokens[10]
|
||||||
|
|
||||||
|
|
||||||
|
def test_i_must_call_init_before_call_as_text():
|
||||||
|
parser_input = ParserInput("execute(c:name1: if r:#id: else c:name2:)")
|
||||||
|
with pytest.raises(Exception) as ex:
|
||||||
|
parser_input.as_text()
|
||||||
|
|
||||||
|
assert ex.value.args[0] == "You must call init() first !"
|
||||||
|
|||||||
@@ -0,0 +1,101 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest
|
||||||
|
from common.global_symbols import NotInit
|
||||||
|
from conftest import NewOntology
|
||||||
|
from core.concept import ConceptDefaultProps
|
||||||
|
from core.python_fragment import PythonFragment
|
||||||
|
from helpers import define_new_concept, get_metadata
|
||||||
|
from services.SheerkaConceptEvaluator import ConceptEvaluator
|
||||||
|
from services.SheerkaPython import EvaluationRef
|
||||||
|
|
||||||
|
|
||||||
|
class TestConceptManager(BaseTest):
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def service(self, sheerka) -> ConceptEvaluator:
|
||||||
|
return sheerka.services[ConceptEvaluator.NAME]
|
||||||
|
|
||||||
|
def test_i_can_build_concept(self, context, service):
|
||||||
|
metadata = get_metadata(
|
||||||
|
name="foo",
|
||||||
|
where="isinstance(x, Concept)",
|
||||||
|
pre="in_context(IS_QUESTION)",
|
||||||
|
body="one + a",
|
||||||
|
post="'post parameter'",
|
||||||
|
ret="self",
|
||||||
|
variables=(("a", "1"), ("b", "NotInit"))
|
||||||
|
)
|
||||||
|
|
||||||
|
compiled = service.build(context, metadata)
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.WHERE)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.where
|
||||||
|
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.PRE)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.pre
|
||||||
|
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.BODY)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.body
|
||||||
|
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.POST)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.post
|
||||||
|
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.RET)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.ret
|
||||||
|
|
||||||
|
pf = getattr(compiled, "a")
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.variables[0][1]
|
||||||
|
|
||||||
|
pf = getattr(compiled, "b")
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.source_code == metadata.variables[1][1]
|
||||||
|
|
||||||
|
def test_i_can_manage_when_no_source_code(self, context, service):
|
||||||
|
metadata = get_metadata(name="foo")
|
||||||
|
|
||||||
|
compiled = service.build(context, metadata)
|
||||||
|
assert getattr(compiled, ConceptDefaultProps.WHERE) is None
|
||||||
|
assert getattr(compiled, ConceptDefaultProps.PRE) is None
|
||||||
|
assert getattr(compiled, ConceptDefaultProps.BODY) is None
|
||||||
|
assert getattr(compiled, ConceptDefaultProps.POST) is None
|
||||||
|
assert getattr(compiled, ConceptDefaultProps.RET) is None
|
||||||
|
|
||||||
|
def test_i_can_detect_when_requested_names_are_concept_variables(self, context, service):
|
||||||
|
metadata = get_metadata(
|
||||||
|
name="foo",
|
||||||
|
body="one + a",
|
||||||
|
variables=(("a", "1"), ("b", "NotInit")))
|
||||||
|
|
||||||
|
compiled = service.build(context, metadata)
|
||||||
|
pf = getattr(compiled, ConceptDefaultProps.BODY)
|
||||||
|
assert isinstance(pf, PythonFragment)
|
||||||
|
assert pf.namespace == {"a": EvaluationRef("self", "a"),
|
||||||
|
"b": EvaluationRef("self", "b")}
|
||||||
|
|
||||||
|
def test_i_can_eval_concept_attributes(self, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_eval_concept_attributes"):
|
||||||
|
foo_metadata = get_metadata(name="foo",
|
||||||
|
where="isinstance(a, int)",
|
||||||
|
pre="True",
|
||||||
|
body="2 + a",
|
||||||
|
post="'post parameter'",
|
||||||
|
ret="self",
|
||||||
|
variables=(("a", "1"), ("b", "NotInit")))
|
||||||
|
foo = define_new_concept(context, foo_metadata)
|
||||||
|
|
||||||
|
res = service.evaluate_concept(context, foo)
|
||||||
|
|
||||||
|
assert context.sheerka.isinstance(res, foo)
|
||||||
|
assert res.get_value("a") == 1
|
||||||
|
assert res.get_value("b") == NotInit
|
||||||
|
assert res.get_value(ConceptDefaultProps.WHERE) is True
|
||||||
|
assert res.get_value(ConceptDefaultProps.PRE) is True
|
||||||
|
assert res.get_value(ConceptDefaultProps.BODY) == 3
|
||||||
|
assert res.get_value(ConceptDefaultProps.POST) == "post parameter"
|
||||||
|
assert res.get_value(ConceptDefaultProps.RET) == res
|
||||||
@@ -4,9 +4,9 @@ from base import BaseTest
|
|||||||
from common.global_symbols import NotFound, NotInit
|
from common.global_symbols import NotFound, NotInit
|
||||||
from conftest import NewOntology
|
from conftest import NewOntology
|
||||||
from core.BuiltinConcepts import BuiltinConcepts
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
from core.ErrorContext import ErrorContext
|
|
||||||
from core.concept import ConceptMetadata
|
from core.concept import ConceptMetadata
|
||||||
from core.services.SheerkaConceptManager import ConceptAlreadyDefined, ConceptManager
|
from core.error import ErrorContext
|
||||||
|
from services.SheerkaConceptManager import ConceptAlreadyDefined, ConceptManager
|
||||||
from helpers import get_metadata
|
from helpers import get_metadata
|
||||||
|
|
||||||
|
|
||||||
@@ -24,7 +24,7 @@ class TestConceptManager(BaseTest):
|
|||||||
"""
|
"""
|
||||||
metadata = get_metadata("foo", "body")
|
metadata = get_metadata("foo", "body")
|
||||||
digest = service.compute_metadata_digest(metadata)
|
digest = service.compute_metadata_digest(metadata)
|
||||||
assert digest == "21a1c2f420da62f4dc60f600c95b19dd9527b19dd28fd38e17f5c0e28963d176"
|
assert digest == "7c0f1708968e0312be622950d3f21d588f718f7ba568054ece64d077052a6476"
|
||||||
|
|
||||||
another_metadata = get_metadata("foo", "body")
|
another_metadata = get_metadata("foo", "body")
|
||||||
other_digest = service.compute_metadata_digest(another_metadata)
|
other_digest = service.compute_metadata_digest(another_metadata)
|
||||||
@@ -86,7 +86,7 @@ class TestConceptManager(BaseTest):
|
|||||||
assert metadata.name == "name"
|
assert metadata.name == "name"
|
||||||
assert metadata.key == "name"
|
assert metadata.key == "name"
|
||||||
assert metadata.body == "body"
|
assert metadata.body == "body"
|
||||||
assert metadata.digest == "eb0620bd4a317af8a403c0ae1e185a528f9b58f8b0878d990e62278f89cf10d5"
|
assert metadata.digest == "c75faa4efbc9ef9dbc5174c52786d5b066e2ece41486b81c27336e292917fecb"
|
||||||
assert metadata.all_attrs == ('#where#', '#pre#', '#post#', '#body#', '#ret#')
|
assert metadata.all_attrs == ('#where#', '#pre#', '#post#', '#body#', '#ret#')
|
||||||
|
|
||||||
# is sorted in db
|
# is sorted in db
|
||||||
@@ -117,6 +117,11 @@ class TestConceptManager(BaseTest):
|
|||||||
res = service.define_new_concept(context, "name", body="body")
|
res = service.define_new_concept(context, "name", body="body")
|
||||||
assert res.status is True
|
assert res.status is True
|
||||||
|
|
||||||
|
def test_i_cannot_get_by_if_concept_does_not_exist(self, service):
|
||||||
|
assert service.get_by_id("unresolved_id") == NotFound
|
||||||
|
assert service.get_by_name("unresolved name") == NotFound
|
||||||
|
assert service.get_by_key("unresolved_hash") == NotFound
|
||||||
|
|
||||||
def test_i_can_get_a_newly_created_concept(self, context, service):
|
def test_i_can_get_a_newly_created_concept(self, context, service):
|
||||||
with NewOntology(context, "test_i_can_get_a_newly_created_concept"):
|
with NewOntology(context, "test_i_can_get_a_newly_created_concept"):
|
||||||
res = service.define_new_concept(context, "name", body="body")
|
res = service.define_new_concept(context, "name", body="body")
|
||||||
@@ -141,6 +146,19 @@ class TestConceptManager(BaseTest):
|
|||||||
assert foo.var1 == "value1"
|
assert foo.var1 == "value1"
|
||||||
assert foo.var2 == "value2"
|
assert foo.var2 == "value2"
|
||||||
|
|
||||||
|
def test_i_can_manage_when_concepts_with_the_same_name(self, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_manage_when_concepts_with_the_same_name"):
|
||||||
|
service.define_new_concept(context, "foo", body="body1")
|
||||||
|
service.define_new_concept(context, "foo", body="body2")
|
||||||
|
|
||||||
|
concepts = service.newn("foo")
|
||||||
|
|
||||||
|
assert len(concepts) == 2
|
||||||
|
assert concepts[0].name == "foo"
|
||||||
|
assert concepts[0].get_metadata().body == "body1"
|
||||||
|
assert concepts[1].name == "foo"
|
||||||
|
assert concepts[1].get_metadata().body == "body2"
|
||||||
|
|
||||||
def test_i_can_instantiate_a_new_concept_by_its_id(self, context, service):
|
def test_i_can_instantiate_a_new_concept_by_its_id(self, context, service):
|
||||||
with NewOntology(context, "test_i_can_instantiate_a_new_concept_by_its_id"):
|
with NewOntology(context, "test_i_can_instantiate_a_new_concept_by_its_id"):
|
||||||
res = service.define_new_concept(context, "foo", variables=[("var1", None), ("var2", None)])
|
res = service.define_new_concept(context, "foo", variables=[("var1", None), ("var2", None)])
|
||||||
@@ -184,3 +202,64 @@ class TestConceptManager(BaseTest):
|
|||||||
|
|
||||||
context.sheerka.om.pop_ontology(context)
|
context.sheerka.om.pop_ontology(context)
|
||||||
assert service.get_by_id(res.value.metadata.id) is NotFound
|
assert service.get_by_id(res.value.metadata.id) is NotFound
|
||||||
|
|
||||||
|
def test_i_can_new(self, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_new"):
|
||||||
|
res = service.define_new_concept(context, "name", body="body", variables=[("my_var", None)])
|
||||||
|
assert res.status
|
||||||
|
metadata = res.value.metadata
|
||||||
|
|
||||||
|
# I can create a new concept
|
||||||
|
res = service.new(metadata, my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
res = service.new((metadata.name, None), my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
res = service.new((None, metadata.id), my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
res = service.new("c:name:", my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
res = service.new("c:#1001:", my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
res = service.new("c:name#1001:", my_var="my_var_value")
|
||||||
|
assert res.id == metadata.id
|
||||||
|
assert res.my_var == "my_var_value"
|
||||||
|
|
||||||
|
# cannot new using id
|
||||||
|
assert service.new(f"1001").name == BuiltinConcepts.UNKNOWN_CONCEPT
|
||||||
|
|
||||||
|
def test_id_is_used_when_name_and_id_are_provided(self, context, service):
|
||||||
|
with NewOntology(context, "test_id_is_used_when_name_and_id_are_provided"):
|
||||||
|
res = service.define_new_concept(context, "name", body="body1")
|
||||||
|
metadata = res.value.metadata
|
||||||
|
service.define_new_concept(context, "name", body="body2")
|
||||||
|
|
||||||
|
assert service.new((metadata.name, metadata.id)).id == metadata.id
|
||||||
|
|
||||||
|
def test_unknown_concept_is_return_if_the_identifier_is_not_found(self, service):
|
||||||
|
assert service.new("unknown").name == BuiltinConcepts.UNKNOWN_CONCEPT
|
||||||
|
|
||||||
|
def test_can_get_all_concepts(self, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_new"):
|
||||||
|
service.define_new_concept(context, "foo")
|
||||||
|
service.define_new_concept(context, "bar")
|
||||||
|
context.sheerka.om.push_ontology("another ontology")
|
||||||
|
service.define_new_concept(context, "baz")
|
||||||
|
service.define_new_concept(context, "qux")
|
||||||
|
|
||||||
|
all_concepts = service.get_all_concepts()
|
||||||
|
assert [c.name for c in all_concepts if not c.is_builtin] == ["foo", "bar", "baz", "qux"]
|
||||||
|
|
||||||
|
# sanity check. Concepts are discarded when ontology is popped
|
||||||
|
context.sheerka.om.pop_ontology(context)
|
||||||
|
all_concepts = service.get_all_concepts()
|
||||||
|
assert [c.name for c in all_concepts if not c.is_builtin] == ["foo", "bar"]
|
||||||
|
|||||||
@@ -0,0 +1,29 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest
|
||||||
|
from services.SheerkaAdmin import SheerkaAdmin
|
||||||
|
from helpers import get_concepts
|
||||||
|
|
||||||
|
|
||||||
|
class TestConceptManager(BaseTest):
|
||||||
|
|
||||||
|
@pytest.fixture()
|
||||||
|
def service(self, sheerka):
|
||||||
|
return sheerka.services[SheerkaAdmin.NAME]
|
||||||
|
|
||||||
|
def test_i_can_test_extended_is_admin(self, context, service):
|
||||||
|
foo, bar = get_concepts(context, "foo", "bar", use_sheerka=True)
|
||||||
|
|
||||||
|
foo1 = context.sheerka.newn("foo")
|
||||||
|
|
||||||
|
assert service.extended_isinstance(1, int)
|
||||||
|
assert service.extended_isinstance(foo, "foo")
|
||||||
|
assert service.extended_isinstance(foo, foo1)
|
||||||
|
assert service.extended_isinstance(foo, foo1.get_metadata())
|
||||||
|
assert service.extended_isinstance(foo, "c:#1001:")
|
||||||
|
|
||||||
|
assert not service.extended_isinstance("1", int)
|
||||||
|
assert not service.extended_isinstance(foo, "bar")
|
||||||
|
assert not service.extended_isinstance(foo, bar)
|
||||||
|
assert not service.extended_isinstance(foo, bar.get_metadata())
|
||||||
|
assert not service.extended_isinstance(foo, "c:#1002:")
|
||||||
@@ -4,28 +4,27 @@ import pytest
|
|||||||
|
|
||||||
from base import BaseTest
|
from base import BaseTest
|
||||||
from core.BuiltinConcepts import BuiltinConcepts
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
from core.ExecutionContext import ExecutionContext, ExecutionContextActions
|
from core.ExecutionContext import ExecutionContext, ContextActions
|
||||||
from core.ReturnValue import ReturnValue
|
from core.ReturnValue import ReturnValue
|
||||||
from core.services.SheerkaEngine import SheerkaEngine
|
|
||||||
from evaluators.CreateParserInput import CreateParserInput
|
from evaluators.CreateParserInput import CreateParserInput
|
||||||
from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, EvaluatorEvalResult, \
|
from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, EvaluatorEvalResult, \
|
||||||
EvaluatorMatchResult, \
|
EvaluatorMatchResult, OneReturnValueEvaluator
|
||||||
OneReturnValueEvaluator
|
|
||||||
from helpers import _rvc
|
from helpers import _rvc
|
||||||
|
from services.SheerkaEngine import SheerkaEngine
|
||||||
|
|
||||||
ALL_STEPS = [
|
ALL_STEPS = [
|
||||||
ExecutionContextActions.BEFORE_PARSING,
|
ContextActions.BEFORE_PARSING,
|
||||||
ExecutionContextActions.PARSING,
|
ContextActions.PARSING,
|
||||||
ExecutionContextActions.AFTER_PARSING,
|
ContextActions.AFTER_PARSING,
|
||||||
ExecutionContextActions.BEFORE_EVALUATION,
|
ContextActions.BEFORE_EVALUATION,
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
ExecutionContextActions.AFTER_EVALUATION
|
ContextActions.AFTER_EVALUATION
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class OneReturnValueEvaluatorForTesting(OneReturnValueEvaluator):
|
class OneReturnValueEvaluatorForTesting(OneReturnValueEvaluator):
|
||||||
def __init__(self, name,
|
def __init__(self, name,
|
||||||
step: ExecutionContextActions,
|
step: ContextActions,
|
||||||
priority: int,
|
priority: int,
|
||||||
enabled=True,
|
enabled=True,
|
||||||
match: bool | Callable = True,
|
match: bool | Callable = True,
|
||||||
@@ -56,12 +55,12 @@ class OneReturnValueEvaluatorForTesting(OneReturnValueEvaluator):
|
|||||||
if ret_val != return_value:
|
if ret_val != return_value:
|
||||||
ret_val.parents = [return_value]
|
ret_val.parents = [return_value]
|
||||||
|
|
||||||
return EvaluatorEvalResult(self.eval_result, self.eval_eaten or [return_value])
|
return EvaluatorEvalResult(self.eval_result, [return_value] if self.eval_eaten is None else self.eval_eaten)
|
||||||
|
|
||||||
|
|
||||||
class AllReturnValuesEvaluatorForTesting(AllReturnValuesEvaluator):
|
class AllReturnValuesEvaluatorForTesting(AllReturnValuesEvaluator):
|
||||||
def __init__(self, name,
|
def __init__(self, name,
|
||||||
step: ExecutionContextActions,
|
step: ContextActions,
|
||||||
priority: int,
|
priority: int,
|
||||||
enabled=True,
|
enabled=True,
|
||||||
match: bool | Callable = True,
|
match: bool | Callable = True,
|
||||||
@@ -91,31 +90,31 @@ class AllReturnValuesEvaluatorForTesting(AllReturnValuesEvaluator):
|
|||||||
for ret_val in self.eval_result:
|
for ret_val in self.eval_result:
|
||||||
ret_val.parents = return_values
|
ret_val.parents = return_values
|
||||||
|
|
||||||
return EvaluatorEvalResult(self.eval_result, self.eval_eaten or return_values)
|
return EvaluatorEvalResult(self.eval_result, return_values if self.eval_eaten is None else self.eval_eaten)
|
||||||
|
|
||||||
|
|
||||||
class TestSheerkaEngine(BaseTest):
|
class TestSheerkaEngine(BaseTest):
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def service(self, sheerka):
|
def service(self, sheerka):
|
||||||
return SheerkaEngine(sheerka)
|
return SheerkaEngine(sheerka) # I want a new instance to keep Sheerka clean (when a change execution_plan)
|
||||||
|
|
||||||
def test_i_can_compute_execution_plan(self, service):
|
def test_i_can_compute_execution_plan(self, service):
|
||||||
assert service.compute_execution_plan([]) == {}
|
assert service.compute_execution_plan([]) == {}
|
||||||
|
|
||||||
e1 = BaseEvaluator("eval1", ExecutionContextActions.BEFORE_EVALUATION, 5)
|
e1 = BaseEvaluator("eval1", ContextActions.BEFORE_EVALUATION, 5)
|
||||||
e2 = BaseEvaluator("eval2", ExecutionContextActions.BEFORE_EVALUATION, 5)
|
e2 = BaseEvaluator("eval2", ContextActions.BEFORE_EVALUATION, 5)
|
||||||
e3 = BaseEvaluator("eval3", ExecutionContextActions.BEFORE_EVALUATION, 10)
|
e3 = BaseEvaluator("eval3", ContextActions.BEFORE_EVALUATION, 10)
|
||||||
e4 = BaseEvaluator("eval4", ExecutionContextActions.EVALUATION, 10)
|
e4 = BaseEvaluator("eval4", ContextActions.EVALUATION, 10)
|
||||||
e5 = BaseEvaluator("eval5", ExecutionContextActions.AFTER_EVALUATION, 10, enabled=False)
|
e5 = BaseEvaluator("eval5", ContextActions.AFTER_EVALUATION, 10, enabled=False)
|
||||||
res = service.compute_execution_plan([e1, e2, e3, e4, e5])
|
res = service.compute_execution_plan([e1, e2, e3, e4, e5])
|
||||||
assert res == {ExecutionContextActions.BEFORE_EVALUATION: {5: [e1, e2], 10: [e3]},
|
assert res == {ContextActions.BEFORE_EVALUATION: {5: [e1, e2], 10: [e3]},
|
||||||
ExecutionContextActions.EVALUATION: {10: [e4]}}
|
ContextActions.EVALUATION: {10: [e4]}}
|
||||||
|
|
||||||
def test_i_can_call_execute(self, sheerka, context, service):
|
def test_i_can_call_execute(self, sheerka, context, service):
|
||||||
service.execution_plan = {ExecutionContextActions.BEFORE_EVALUATION: {50: [CreateParserInput()]}}
|
service.execution_plan = {ContextActions.BEFORE_EVALUATION: {50: [CreateParserInput()]}}
|
||||||
start = [ReturnValue("TestSheerkaEngine", True, sheerka.newn(BuiltinConcepts.USER_INPUT, command="1 + 1"))]
|
start = [ReturnValue("TestSheerkaEngine", True, sheerka.newn(BuiltinConcepts.USER_INPUT, command="1 + 1"))]
|
||||||
|
|
||||||
ret = service.execute(context, start, [ExecutionContextActions.BEFORE_EVALUATION])
|
ret = service.execute(context, start, [ContextActions.BEFORE_EVALUATION])
|
||||||
assert len(ret) == 1
|
assert len(ret) == 1
|
||||||
ret = ret[0]
|
ret = ret[0]
|
||||||
assert isinstance(ret, ReturnValue)
|
assert isinstance(ret, ReturnValue)
|
||||||
@@ -127,7 +126,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = {}
|
service.execution_plan = {}
|
||||||
start = [_rvc("foo")]
|
start = [_rvc("foo")]
|
||||||
|
|
||||||
ret = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
ret = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
|
|
||||||
assert ret == start
|
assert ret == start
|
||||||
|
|
||||||
@@ -135,12 +134,12 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
# properly init the service
|
# properly init the service
|
||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1", ExecutionContextActions.AFTER_PARSING, 21, match=False),
|
_("eval1", ContextActions.AFTER_PARSING, 21, match=False),
|
||||||
_("eval2", ExecutionContextActions.BEFORE_EVALUATION, 5, match=False),
|
_("eval2", ContextActions.BEFORE_EVALUATION, 5, match=False),
|
||||||
_("eval3", ExecutionContextActions.AFTER_EVALUATION, 12, match=False),
|
_("eval3", ContextActions.AFTER_EVALUATION, 12, match=False),
|
||||||
_("eval4", ExecutionContextActions.EVALUATION, 99, match=False),
|
_("eval4", ContextActions.EVALUATION, 99, match=False),
|
||||||
_("eval5", ExecutionContextActions.BEFORE_PARSING, 5, match=False),
|
_("eval5", ContextActions.BEFORE_PARSING, 5, match=False),
|
||||||
_("eval6", ExecutionContextActions.PARSING, 25, match=False),
|
_("eval6", ContextActions.PARSING, 25, match=False),
|
||||||
]
|
]
|
||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
@@ -155,15 +154,15 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
# properly init the service
|
# properly init the service
|
||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1", ExecutionContextActions.EVALUATION, 20, match=False),
|
_("eval1", ContextActions.EVALUATION, 20, match=False),
|
||||||
_("eval2", ExecutionContextActions.EVALUATION, 5, match=False),
|
_("eval2", ContextActions.EVALUATION, 5, match=False),
|
||||||
_("eval3", ExecutionContextActions.EVALUATION, 20, match=False),
|
_("eval3", ContextActions.EVALUATION, 20, match=False),
|
||||||
_("eval4", ExecutionContextActions.EVALUATION, 99, match=False),
|
_("eval4", ContextActions.EVALUATION, 99, match=False),
|
||||||
]
|
]
|
||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [_rvc("foo")]
|
start = [_rvc("foo")]
|
||||||
service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
|
|
||||||
# to check what happened, look at the execution context children
|
# to check what happened, look at the execution context children
|
||||||
evaluators_executed = [ec.action_context["evaluator"] for ec in context.get_children() if
|
evaluators_executed = [ec.action_context["evaluator"] for ec in context.get_children() if
|
||||||
@@ -176,7 +175,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_bar])
|
eval_result=[rv_bar])
|
||||||
@@ -184,8 +183,8 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_foo]
|
start = [rv_foo]
|
||||||
service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
children = [ec for ec in context.get_children() if ec.action == ExecutionContextActions.EVALUATING_ITERATION]
|
children = [ec for ec in context.get_children() if ec.action == ContextActions.EVALUATING_ITERATION]
|
||||||
assert len(children) == 2
|
assert len(children) == 2
|
||||||
|
|
||||||
def test_eval_is_not_called_if_match_fails_for_one_return(self, context, service):
|
def test_eval_is_not_called_if_match_fails_for_one_return(self, context, service):
|
||||||
@@ -193,7 +192,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[_rvc("bar")])
|
eval_result=[_rvc("bar")])
|
||||||
@@ -201,7 +200,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [_rvc("baz")]
|
start = [_rvc("baz")]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == start
|
assert res == start
|
||||||
|
|
||||||
# check what happen in details
|
# check what happen in details
|
||||||
@@ -214,7 +213,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[_rvc("bar")])
|
eval_result=[_rvc("bar")])
|
||||||
@@ -222,7 +221,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [_rvc("foo")]
|
start = [_rvc("foo")]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [_rvc("bar")]
|
assert res == [_rvc("bar")]
|
||||||
assert res[0].parents == start
|
assert res[0].parents == start
|
||||||
|
|
||||||
@@ -238,7 +237,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_qux])
|
eval_result=[rv_qux])
|
||||||
@@ -246,7 +245,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_bar, rv_foo, rv_baz]
|
start = [rv_bar, rv_foo, rv_baz]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [rv_bar, rv_qux, rv_baz] # We must keep the order ! rv_qux replaces rv_foo
|
assert res == [rv_bar, rv_qux, rv_baz] # We must keep the order ! rv_qux replaces rv_foo
|
||||||
assert res[0].parents is None
|
assert res[0].parents is None
|
||||||
assert res[1].parents == [rv_foo]
|
assert res[1].parents == [rv_foo]
|
||||||
@@ -267,12 +266,12 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_bar]),
|
eval_result=[rv_bar]),
|
||||||
_("eval2",
|
_("eval2",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_baz])
|
eval_result=[rv_baz])
|
||||||
@@ -280,7 +279,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_qux, rv_foo, rv_qux]
|
start = [rv_qux, rv_foo, rv_qux]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [rv_qux, rv_bar, rv_baz, rv_qux] # they both eat it !
|
assert res == [rv_qux, rv_bar, rv_baz, rv_qux] # they both eat it !
|
||||||
assert res[1].parents == [rv_foo]
|
assert res[1].parents == [rv_foo]
|
||||||
assert res[2].parents == [rv_foo]
|
assert res[2].parents == [rv_foo]
|
||||||
@@ -293,12 +292,12 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = OneReturnValueEvaluatorForTesting
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_bar]),
|
eval_result=[rv_bar]),
|
||||||
_("eval2",
|
_("eval2",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
30,
|
30,
|
||||||
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
eval_result=[rv_baz])
|
eval_result=[rv_baz])
|
||||||
@@ -306,7 +305,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_foo]
|
start = [rv_foo]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [rv_baz]
|
assert res == [rv_baz]
|
||||||
assert res[0].parents == start
|
assert res[0].parents == start
|
||||||
|
|
||||||
@@ -315,7 +314,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = AllReturnValuesEvaluatorForTesting
|
_ = AllReturnValuesEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda r: context.sheerka.isinstance(r[0].value, "foo"),
|
match=lambda r: context.sheerka.isinstance(r[0].value, "foo"),
|
||||||
eval_result=[_rvc("bar")])
|
eval_result=[_rvc("bar")])
|
||||||
@@ -323,11 +322,11 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [_rvc("baz")]
|
start = [_rvc("baz")]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == start
|
assert res == start
|
||||||
|
|
||||||
start = [_rvc("foo")]
|
start = [_rvc("foo")]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [_rvc("bar")]
|
assert res == [_rvc("bar")]
|
||||||
assert res[0].parents == start
|
assert res[0].parents == start
|
||||||
|
|
||||||
@@ -338,7 +337,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = AllReturnValuesEvaluatorForTesting
|
_ = AllReturnValuesEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"),
|
match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"),
|
||||||
eval_result=[rv_bar])
|
eval_result=[rv_bar])
|
||||||
@@ -346,7 +345,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_baz, rv_foo] # foo is not the first in the list
|
start = [rv_baz, rv_foo] # foo is not the first in the list
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == start
|
assert res == start
|
||||||
|
|
||||||
# check what happen in details
|
# check what happen in details
|
||||||
@@ -360,7 +359,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
_ = AllReturnValuesEvaluatorForTesting
|
_ = AllReturnValuesEvaluatorForTesting
|
||||||
evaluators = [
|
evaluators = [
|
||||||
_("eval1",
|
_("eval1",
|
||||||
ExecutionContextActions.EVALUATION,
|
ContextActions.EVALUATION,
|
||||||
20,
|
20,
|
||||||
match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"),
|
match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"),
|
||||||
eval_result=[rv_bar])
|
eval_result=[rv_bar])
|
||||||
@@ -368,7 +367,7 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
service.execution_plan = service.compute_execution_plan(evaluators)
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
start = [rv_foo, rv_baz]
|
start = [rv_foo, rv_baz]
|
||||||
res = service.execute(context, start, [ExecutionContextActions.EVALUATION])
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
assert res == [rv_bar]
|
assert res == [rv_bar]
|
||||||
assert res[0].parents == start
|
assert res[0].parents == start
|
||||||
|
|
||||||
@@ -377,3 +376,28 @@ class TestSheerkaEngine(BaseTest):
|
|||||||
exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children()))
|
exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children()))
|
||||||
evaluation_trace = exec_context.values["evaluation"]
|
evaluation_trace = exec_context.values["evaluation"]
|
||||||
assert evaluation_trace == {"match": True, "new": res, "eaten": start}
|
assert evaluation_trace == {"match": True, "new": res, "eaten": start}
|
||||||
|
|
||||||
|
def test_ret_val_not_removed_does_not_cause_infinite_recursion(self, context, service):
|
||||||
|
rv_foo, rv_bar = _rvc("foo"), _rvc("bar") # rv => ReturnValue
|
||||||
|
|
||||||
|
# properly init the service
|
||||||
|
# both evaluator want to eat 'foo'
|
||||||
|
_ = OneReturnValueEvaluatorForTesting
|
||||||
|
evaluators = [
|
||||||
|
_("eval",
|
||||||
|
ContextActions.EVALUATION,
|
||||||
|
20,
|
||||||
|
match=lambda r: context.sheerka.isinstance(r.value, "foo"),
|
||||||
|
eval_result=[rv_bar], eval_eaten=[]),
|
||||||
|
]
|
||||||
|
service.execution_plan = service.compute_execution_plan(evaluators)
|
||||||
|
|
||||||
|
# in the test, 'foo' produces 'bar', but is not removed
|
||||||
|
# during the second iteration, 'foo' still exists, so it will produce 'bar' again
|
||||||
|
# and so on...
|
||||||
|
# This test validate that the infinite loop is broken
|
||||||
|
start = [rv_foo]
|
||||||
|
res = service.execute(context, start, [ContextActions.EVALUATION])
|
||||||
|
|
||||||
|
assert res == [rv_bar]
|
||||||
|
assert res[0].parents == [rv_foo]
|
||||||
|
|||||||
@@ -0,0 +1,75 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest, DummyObj
|
||||||
|
from caching.FastCache import FastCache
|
||||||
|
from core.ExecutionContext import ContextActions
|
||||||
|
from services.SheerkaMemory import SheerkaMemory
|
||||||
|
|
||||||
|
|
||||||
|
class TestSheerkaEngine(BaseTest):
|
||||||
|
@pytest.fixture()
|
||||||
|
def service(self, sheerka):
|
||||||
|
return SheerkaMemory(sheerka) # I want a new instance to keep Sheerka clean (when I update stm)
|
||||||
|
|
||||||
|
def test_i_can_add_to_global_short_term_memory(self, service):
|
||||||
|
dummy = DummyObj()
|
||||||
|
service.add_to_short_term_memory(None, "a", dummy)
|
||||||
|
|
||||||
|
assert service.short_term_objects.copy() == {'global': {'a': dummy}}
|
||||||
|
|
||||||
|
def test_i_can_add_and_get_stm_data(self, context, service):
|
||||||
|
sub_context = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
|
||||||
|
service.add_to_short_term_memory(None, "a", "global level")
|
||||||
|
service.add_to_short_term_memory(context, "a", "context level")
|
||||||
|
service.add_to_short_term_memory(sub_context, "a", "sub context level")
|
||||||
|
|
||||||
|
assert service.get_from_short_term_memory(sub_context, "a") == "sub context level"
|
||||||
|
assert service.get_from_short_term_memory(context, "a") == "context level"
|
||||||
|
assert service.get_from_short_term_memory(None, "a") == "global level"
|
||||||
|
|
||||||
|
def test_i_can_list_stm_data(self, context, service):
|
||||||
|
sub_context = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
|
||||||
|
service.add_to_short_term_memory(None, "a", "global a")
|
||||||
|
service.add_to_short_term_memory(None, "b", "global b")
|
||||||
|
service.add_to_short_term_memory(context, "a", "context a")
|
||||||
|
service.add_to_short_term_memory(context, "c", "context c")
|
||||||
|
service.add_to_short_term_memory(sub_context, "d", "sub context d")
|
||||||
|
service.add_to_short_term_memory(sub_context, "a", "sub context a")
|
||||||
|
|
||||||
|
assert service.list_short_term_memory(sub_context) == {"a": "sub context a",
|
||||||
|
"b": "global b",
|
||||||
|
"c": "context c",
|
||||||
|
"d": "sub context d"}
|
||||||
|
|
||||||
|
assert service.list_short_term_memory(context) == {"a": "context a",
|
||||||
|
"b": "global b",
|
||||||
|
"c": "context c"}
|
||||||
|
|
||||||
|
assert service.list_short_term_memory(None) == {"a": "global a",
|
||||||
|
"b": "global b"}
|
||||||
|
|
||||||
|
def test_i_can_list_stm_data_when_context_have_no_entry(self, context, service):
|
||||||
|
sub_context = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
|
||||||
|
service.add_to_short_term_memory(sub_context, "d", "sub context d")
|
||||||
|
service.add_to_short_term_memory(sub_context, "a", "sub context a")
|
||||||
|
|
||||||
|
assert service.list_short_term_memory(sub_context) == {"a": "sub context a", "d": "sub context d"}
|
||||||
|
assert service.list_short_term_memory(context) == {}
|
||||||
|
assert service.list_short_term_memory(None) == {}
|
||||||
|
|
||||||
|
def test_i_value_are_removed_when_cache_is_full(self, context, service):
|
||||||
|
service.short_term_objects = FastCache(3)
|
||||||
|
context1 = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
context2 = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
context3 = context.push("TestSheerkaEngine", ContextActions.TESTING, None)
|
||||||
|
|
||||||
|
service.add_to_short_term_memory(context, "a", "context")
|
||||||
|
service.add_to_short_term_memory(context1, "b", "context 1")
|
||||||
|
service.add_to_short_term_memory(context2, "c", "context 2")
|
||||||
|
assert context.id in service.short_term_objects
|
||||||
|
|
||||||
|
service.add_to_short_term_memory(context3, "d", "context 3")
|
||||||
|
assert context.id not in service.short_term_objects
|
||||||
@@ -0,0 +1,127 @@
|
|||||||
|
import pytest
|
||||||
|
|
||||||
|
from base import BaseTest, DummyObj
|
||||||
|
from common.global_symbols import NoFirstToken, NotFound, NotInit, Removed
|
||||||
|
from conftest import NewOntology
|
||||||
|
from core.BuiltinConcepts import BuiltinConcepts
|
||||||
|
from evaluators.PythonParser import PythonParser
|
||||||
|
from helpers import _rv, define_new_concept, get_concepts, get_metadata
|
||||||
|
from parsers.ParserInput import ParserInput
|
||||||
|
from services.SheerkaPython import EvaluationRef, SheerkaPython
|
||||||
|
|
||||||
|
|
||||||
|
def get_python_fragment(sheerka, context, command):
|
||||||
|
pi = ParserInput(command)
|
||||||
|
pi.init()
|
||||||
|
parser_start = _rv(sheerka.newn(BuiltinConcepts.PARSER_INPUT, pi=pi))
|
||||||
|
ret = PythonParser().eval(context, None, parser_start)
|
||||||
|
return ret.new[0].value.pf
|
||||||
|
|
||||||
|
|
||||||
|
class TestSheerkaPython(BaseTest):
|
||||||
|
@pytest.fixture()
|
||||||
|
def service(self, sheerka) -> SheerkaPython:
|
||||||
|
return sheerka.services[SheerkaPython.NAME]
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, expected", [
|
||||||
|
("1 + 1", 2),
|
||||||
|
("echo('I have access to Sheerka !')", "I have access to Sheerka !"),
|
||||||
|
("sheerka.echo('I have access to Sheerka !')", "I have access to Sheerka !"),
|
||||||
|
("a=10\na", 10),
|
||||||
|
("NotInit", NotInit),
|
||||||
|
("NotFound", NotFound),
|
||||||
|
("Removed", Removed),
|
||||||
|
("NoFirstToken", NoFirstToken),
|
||||||
|
])
|
||||||
|
def test_i_can_evaluate_simple_expression(self, sheerka, context, service, text, expected):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, text)
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == expected
|
||||||
|
|
||||||
|
def test_i_can_eval_isinstance_for_type(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "isinstance('some string', str)")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
def test_i_can_eval_isinstance_for_concept(self, sheerka, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_eval_isinstance_for_concept"):
|
||||||
|
get_concepts(context, "foo", use_sheerka=True)
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "isinstance(foo, 'foo')")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
# 'foo' is also a Concept
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "isinstance(foo, Concept)")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret is True
|
||||||
|
|
||||||
|
def test_i_can_use_value_from_global_namespace(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "self.a")
|
||||||
|
|
||||||
|
ret = service.evaluate_python(context, python_fragment, {"self": DummyObj("my dummy value")})
|
||||||
|
assert ret == "my dummy value"
|
||||||
|
|
||||||
|
def test_i_can_eval_using_eval_ref(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "a")
|
||||||
|
python_fragment.namespace = {"a": EvaluationRef("self", "a")}
|
||||||
|
|
||||||
|
ret = service.evaluate_python(context, python_fragment, {"self": DummyObj("my dummy value")})
|
||||||
|
assert ret == "my dummy value"
|
||||||
|
|
||||||
|
@pytest.mark.skip("Concept evaluation is not implemented")
|
||||||
|
def test_i_can_eval_concept_properties(self, sheerka, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_eval_concept_properties"):
|
||||||
|
foo_meta = get_metadata("foo", variables=[("a", "hello world")])
|
||||||
|
define_new_concept(context, foo_meta)
|
||||||
|
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "foo.a")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == "hello world"
|
||||||
|
|
||||||
|
@pytest.mark.skip("Concept evaluation is not implemented")
|
||||||
|
def test_i_can_eval_python_mixed_with_concept(self, sheerka, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_eval_python_mixed_with_concept"):
|
||||||
|
foo_meta = get_metadata("foo", variables=[("a", "1")])
|
||||||
|
bar_meta = get_metadata("bar", body="2")
|
||||||
|
get_concepts(context, foo_meta, bar_meta, use_sheerka=True)
|
||||||
|
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "bar + foo.a")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == "3"
|
||||||
|
|
||||||
|
def test_i_can_remember_previous_results(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "a=10")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret is None
|
||||||
|
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "a")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == 10
|
||||||
|
|
||||||
|
def test_i_can_import_module(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "import math")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret is None
|
||||||
|
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "math.sqrt(4)")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == 2
|
||||||
|
|
||||||
|
def test_i_can_import_function_from_module(self, sheerka, context, service):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "from math import sqrt")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret is None
|
||||||
|
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "sqrt(4)")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert ret == 2
|
||||||
|
|
||||||
|
def test_i_can_eval_when_context_is_needed(self, sheerka, context, service):
|
||||||
|
with NewOntology(context, "test_i_can_eval_when_context_is_needed"):
|
||||||
|
python_fragment = get_python_fragment(sheerka, context, "define_new_concept('foo')")
|
||||||
|
ret = service.evaluate_python(context, python_fragment)
|
||||||
|
assert sheerka.isinstance(ret.value, BuiltinConcepts.NEW_CONCEPT)
|
||||||
|
# for info, there are two level of value
|
||||||
|
# one for PythonEvaluator return value
|
||||||
|
# one for the ConceptManager return value
|
||||||
@@ -247,7 +247,7 @@ class TestSheerkaPickleHandler(BaseTest):
|
|||||||
def test_i_can_encode_decode_execution_context(self):
|
def test_i_can_encode_decode_execution_context(self):
|
||||||
sheerka = self.get_sheerka()
|
sheerka = self.get_sheerka()
|
||||||
c = Concept("foo").def_var("a")
|
c = Concept("foo").def_var("a")
|
||||||
context = ExecutionContext("who", Event("xxx"), sheerka, BuiltinConcepts.EVALUATE_CONCEPT, c, "my desc")
|
context = ExecutionContext("who", Event("xxx"), sheerka, BuiltinConcepts.EVALUATING_CONCEPT, c, "my desc")
|
||||||
input_list = [ReturnValueConcept("who", True, 10), ReturnValueConcept("who2", False, 20)]
|
input_list = [ReturnValueConcept("who", True, 10), ReturnValueConcept("who2", False, 20)]
|
||||||
context.inputs = {"a": input_list, "b": set_full_serialization(Concept("foo"))}
|
context.inputs = {"a": input_list, "b": set_full_serialization(Concept("foo"))}
|
||||||
context.values = {"c": input_list, "d": set_full_serialization(Concept("bar"))}
|
context.values = {"c": input_list, "d": set_full_serialization(Concept("bar"))}
|
||||||
|
|||||||
@@ -110,7 +110,7 @@ def test_i_can_auto_init():
|
|||||||
assert metadata.is_unique is False
|
assert metadata.is_unique is False
|
||||||
assert metadata.is_builtin is False
|
assert metadata.is_builtin is False
|
||||||
assert metadata.definition_type is DefinitionType.DEFAULT
|
assert metadata.definition_type is DefinitionType.DEFAULT
|
||||||
assert metadata.digest == '426d88b1b928a421366c12fb283267b89610cbfb9efb470813ea8b5ba37a2013'
|
assert metadata.digest == '9e058bc1261d1e2c785889147066ce89960fd6844db5bb6f1d1d809a8eb790b7'
|
||||||
|
|
||||||
|
|
||||||
def test_sequences_are_incremented_when_multiples_call():
|
def test_sequences_are_incremented_when_multiples_call():
|
||||||
|
|||||||
Reference in New Issue
Block a user