from typing import Callable import pytest from base import BaseTest from core.BuiltinConcepts import BuiltinConcepts from core.ExecutionContext import ExecutionContext, ContextActions from core.ReturnValue import ReturnValue from evaluators.CreateParserInput import CreateParserInput from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, EvaluatorEvalResult, \ EvaluatorMatchResult, OneReturnValueEvaluator from helpers import _rvc from services.SheerkaEngine import SheerkaEngine ALL_STEPS = [ ContextActions.BEFORE_PARSING, ContextActions.PARSING, ContextActions.AFTER_PARSING, ContextActions.BEFORE_EVALUATION, ContextActions.EVALUATION, ContextActions.AFTER_EVALUATION ] class OneReturnValueEvaluatorForTesting(OneReturnValueEvaluator): def __init__(self, name, step: ContextActions, priority: int, enabled=True, match: bool | Callable = True, match_context=None, eval_result: list[ReturnValue] = None, eval_eaten: list[ReturnValue] = None): super().__init__(name, step, priority, enabled) self.matches_delegate = match self.matches_context = match_context self.eval_result = eval_result self.eval_eaten = eval_eaten def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult: # if status is a bool, use it # otherwise, it's a delegate, so apply to return_value status = self.matches_delegate if \ isinstance(self.matches_delegate, bool) else \ self.matches_delegate(return_value) return EvaluatorMatchResult(status, self.matches_context) def eval(self, context: ExecutionContext, evaluation_context: object, return_value: ReturnValue) -> EvaluatorEvalResult: # make sure to correctly set up the parent when the return value is modified if self.eval_result: for ret_val in self.eval_result: if ret_val != return_value: ret_val.parents = [return_value] return EvaluatorEvalResult(self.eval_result, [return_value] if self.eval_eaten is None else self.eval_eaten) class AllReturnValuesEvaluatorForTesting(AllReturnValuesEvaluator): def __init__(self, name, step: ContextActions, priority: int, enabled=True, match: bool | Callable = True, match_context=None, eval_result: list[ReturnValue] = None, eval_eaten: list[ReturnValue] = None): super().__init__(name, step, priority, enabled) self.matches_delegate = match self.matches_context = match_context self.eval_result = eval_result self.eval_eaten = eval_eaten def matches(self, context: ExecutionContext, return_values: list[ReturnValue]) -> EvaluatorMatchResult: # if status is a bool, use it # otherwise, it's a delegate, so apply to return_value status = self.matches_delegate if \ isinstance(self.matches_delegate, bool) else \ self.matches_delegate(return_values) return EvaluatorMatchResult(status, self.matches_context) def eval(self, context: ExecutionContext, evaluation_context: object, return_values: list[ReturnValue]) -> EvaluatorEvalResult: # make sure to correctly set up the parent when the return value is modified if self.eval_result: for ret_val in self.eval_result: ret_val.parents = return_values return EvaluatorEvalResult(self.eval_result, return_values if self.eval_eaten is None else self.eval_eaten) class TestSheerkaEngine(BaseTest): @pytest.fixture() def service(self, sheerka): return SheerkaEngine(sheerka) # I want a new instance to keep Sheerka clean (when a change execution_plan) def test_i_can_compute_execution_plan(self, service): assert service.compute_execution_plan([]) == {} e1 = BaseEvaluator("eval1", ContextActions.BEFORE_EVALUATION, 5) e2 = BaseEvaluator("eval2", ContextActions.BEFORE_EVALUATION, 5) e3 = BaseEvaluator("eval3", ContextActions.BEFORE_EVALUATION, 10) e4 = BaseEvaluator("eval4", ContextActions.EVALUATION, 10) e5 = BaseEvaluator("eval5", ContextActions.AFTER_EVALUATION, 10, enabled=False) res = service.compute_execution_plan([e1, e2, e3, e4, e5]) assert res == {ContextActions.BEFORE_EVALUATION: {5: [e1, e2], 10: [e3]}, ContextActions.EVALUATION: {10: [e4]}} def test_i_can_call_execute(self, sheerka, context, service): service.execution_plan = {ContextActions.BEFORE_EVALUATION: {50: [CreateParserInput()]}} start = [ReturnValue("TestSheerkaEngine", True, sheerka.newn(BuiltinConcepts.USER_INPUT, command="1 + 1"))] ret = service.execute(context, start, [ContextActions.BEFORE_EVALUATION]) assert len(ret) == 1 ret = ret[0] assert isinstance(ret, ReturnValue) assert ret.who == CreateParserInput.NAME assert ret.status is True assert ret.parents == start def test_that_return_values_is_unchanged_when_no_evaluator(self, context, service): service.execution_plan = {} start = [_rvc("foo")] ret = service.execute(context, start, [ContextActions.EVALUATION]) assert ret == start def test_steps_are_executed_in_correct_order(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.AFTER_PARSING, 21, match=False), _("eval2", ContextActions.BEFORE_EVALUATION, 5, match=False), _("eval3", ContextActions.AFTER_EVALUATION, 12, match=False), _("eval4", ContextActions.EVALUATION, 99, match=False), _("eval5", ContextActions.BEFORE_PARSING, 5, match=False), _("eval6", ContextActions.PARSING, 25, match=False), ] service.execution_plan = service.compute_execution_plan(evaluators) # init test variables start = [_rvc("foo")] service.execute(context, start, ALL_STEPS) # to check what happened, look at the execution context children executed_steps = [ec.action_context["step"] for ec in context.get_children(level=1)] assert executed_steps == ALL_STEPS def test_higher_priority_evaluators_are_executed_first(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=False), _("eval2", ContextActions.EVALUATION, 5, match=False), _("eval3", ContextActions.EVALUATION, 20, match=False), _("eval4", ContextActions.EVALUATION, 99, match=False), ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("foo")] service.execute(context, start, [ContextActions.EVALUATION]) # to check what happened, look at the execution context children evaluators_executed = [ec.action_context["evaluator"] for ec in context.get_children() if "evaluator" in ec.action_context] assert evaluators_executed == ["eval4", "eval1", "eval3", "eval2"] def test_evaluation_loop_stops_when_no_modification(self, context, service): rv_foo, rv_bar = _rvc("foo"), _rvc("bar") # rv => ReturnValue # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo] service.execute(context, start, [ContextActions.EVALUATION]) children = [ec for ec in context.get_children() if ec.action == ContextActions.EVALUATING_ITERATION] assert len(children) == 2 def test_eval_is_not_called_if_match_fails_for_one_return(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("baz")] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": start[0], "match": False}] def test_eval_is_called_if_match_succeed_for_one_return(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("foo")] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [_rvc("bar")] assert res[0].parents == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": start[0], "match": True, "new": res, "eaten": start}] def test_all_item_are_processed_during_one_return(self, context, service): rv_foo, rv_bar, rv_baz, rv_qux = _rvc("foo"), _rvc("bar"), _rvc("baz"), _rvc("qux") # rv => ReturnValue # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_qux]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_bar, rv_foo, rv_baz] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [rv_bar, rv_qux, rv_baz] # We must keep the order ! rv_qux replaces rv_foo assert res[0].parents is None assert res[1].parents == [rv_foo] assert res[2].parents is None # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": rv_bar, "match": False}, {"item": rv_foo, "match": True, "new": [rv_qux], "eaten": [rv_foo]}, {"item": rv_baz, "match": False}] def test_evaluators_with_the_same_priority_do_not_compete_with_each_other_one_return(self, context, service): rv_foo, rv_bar, rv_baz, rv_qux = _rvc("foo"), _rvc("bar"), _rvc("baz"), _rvc("qux") # rv => ReturnValue # properly init the service # both evaluator want to eat 'foo' _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]), _("eval2", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_baz]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_qux, rv_foo, rv_qux] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [rv_qux, rv_bar, rv_baz, rv_qux] # they both eat it ! assert res[1].parents == [rv_foo] assert res[2].parents == [rv_foo] def test_evaluators_with_higher_priority_take_precedence_one_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service # both evaluator want to eat 'foo' _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]), _("eval2", ContextActions.EVALUATION, 30, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_baz]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [rv_baz] assert res[0].parents == start def test_evaluator_matches_is_called_before_eval_for_all_return(self, context, service): # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r[0].value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("baz")] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == start start = [_rvc("foo")] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [_rvc("bar")] assert res[0].parents == start def test_eval_is_not_call_if_match_fails_for_all_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_baz, rv_foo] # foo is not the first in the list res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == {"match": False} def test_eval_is_called_if_match_succeed_for_all_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ContextActions.EVALUATION, 20, match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo, rv_baz] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [rv_bar] assert res[0].parents == start children = list(context.get_children()) # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == {"match": True, "new": res, "eaten": start} def test_ret_val_not_removed_does_not_cause_infinite_recursion(self, context, service): rv_foo, rv_bar = _rvc("foo"), _rvc("bar") # rv => ReturnValue # properly init the service # both evaluator want to eat 'foo' _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval", ContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar], eval_eaten=[]), ] service.execution_plan = service.compute_execution_plan(evaluators) # in the test, 'foo' produces 'bar', but is not removed # during the second iteration, 'foo' still exists, so it will produce 'bar' again # and so on... # This test validate that the infinite loop is broken start = [rv_foo] res = service.execute(context, start, [ContextActions.EVALUATION]) assert res == [rv_bar] assert res[0].parents == [rv_foo]