from typing import Callable import pytest from base import BaseTest from core.BuiltinConcepts import BuiltinConcepts from core.ExecutionContext import ExecutionContext, ExecutionContextActions from core.ReturnValue import ReturnValue from core.services.SheerkaEngine import SheerkaEngine from evaluators.CreateParserInput import CreateParserInput from evaluators.base_evaluator import AllReturnValuesEvaluator, BaseEvaluator, EvaluatorEvalResult, \ EvaluatorMatchResult, \ OneReturnValueEvaluator from helpers import _rvc ALL_STEPS = [ ExecutionContextActions.BEFORE_PARSING, ExecutionContextActions.PARSING, ExecutionContextActions.AFTER_PARSING, ExecutionContextActions.BEFORE_EVALUATION, ExecutionContextActions.EVALUATION, ExecutionContextActions.AFTER_EVALUATION ] class OneReturnValueEvaluatorForTesting(OneReturnValueEvaluator): def __init__(self, name, step: ExecutionContextActions, priority: int, enabled=True, match: bool | Callable = True, match_context=None, eval_result: list[ReturnValue] = None, eval_eaten: list[ReturnValue] = None): super().__init__(name, step, priority, enabled) self.matches_delegate = match self.matches_context = match_context self.eval_result = eval_result self.eval_eaten = eval_eaten def matches(self, context: ExecutionContext, return_value: ReturnValue) -> EvaluatorMatchResult: # if status is a bool, use it # otherwise, it's a delegate, so apply to return_value status = self.matches_delegate if \ isinstance(self.matches_delegate, bool) else \ self.matches_delegate(return_value) return EvaluatorMatchResult(status, self.matches_context) def eval(self, context: ExecutionContext, evaluation_context: object, return_value: ReturnValue) -> EvaluatorEvalResult: # make sure to correctly set up the parent when the return value is modified if self.eval_result: for ret_val in self.eval_result: if ret_val != return_value: ret_val.parents = [return_value] return EvaluatorEvalResult(self.eval_result, self.eval_eaten or [return_value]) class AllReturnValuesEvaluatorForTesting(AllReturnValuesEvaluator): def __init__(self, name, step: ExecutionContextActions, priority: int, enabled=True, match: bool | Callable = True, match_context=None, eval_result: list[ReturnValue] = None, eval_eaten: list[ReturnValue] = None): super().__init__(name, step, priority, enabled) self.matches_delegate = match self.matches_context = match_context self.eval_result = eval_result self.eval_eaten = eval_eaten def matches(self, context: ExecutionContext, return_values: list[ReturnValue]) -> EvaluatorMatchResult: # if status is a bool, use it # otherwise, it's a delegate, so apply to return_value status = self.matches_delegate if \ isinstance(self.matches_delegate, bool) else \ self.matches_delegate(return_values) return EvaluatorMatchResult(status, self.matches_context) def eval(self, context: ExecutionContext, evaluation_context: object, return_values: list[ReturnValue]) -> EvaluatorEvalResult: # make sure to correctly set up the parent when the return value is modified if self.eval_result: for ret_val in self.eval_result: ret_val.parents = return_values return EvaluatorEvalResult(self.eval_result, self.eval_eaten or return_values) class TestSheerkaEngine(BaseTest): @pytest.fixture() def service(self, sheerka): return SheerkaEngine(sheerka) def test_i_can_compute_execution_plan(self, service): assert service.compute_execution_plan([]) == {} e1 = BaseEvaluator("eval1", ExecutionContextActions.BEFORE_EVALUATION, 5) e2 = BaseEvaluator("eval2", ExecutionContextActions.BEFORE_EVALUATION, 5) e3 = BaseEvaluator("eval3", ExecutionContextActions.BEFORE_EVALUATION, 10) e4 = BaseEvaluator("eval4", ExecutionContextActions.EVALUATION, 10) e5 = BaseEvaluator("eval5", ExecutionContextActions.AFTER_EVALUATION, 10, enabled=False) res = service.compute_execution_plan([e1, e2, e3, e4, e5]) assert res == {ExecutionContextActions.BEFORE_EVALUATION: {5: [e1, e2], 10: [e3]}, ExecutionContextActions.EVALUATION: {10: [e4]}} def test_i_can_call_execute(self, sheerka, context, service): service.execution_plan = {ExecutionContextActions.BEFORE_EVALUATION: {50: [CreateParserInput()]}} start = [ReturnValue("TestSheerkaEngine", True, sheerka.newn(BuiltinConcepts.USER_INPUT, command="1 + 1"))] ret = service.execute(context, start, [ExecutionContextActions.BEFORE_EVALUATION]) assert len(ret) == 1 ret = ret[0] assert isinstance(ret, ReturnValue) assert ret.who == CreateParserInput.NAME assert ret.status is True assert ret.parents == start def test_that_return_values_is_unchanged_when_no_evaluator(self, context, service): service.execution_plan = {} start = [_rvc("foo")] ret = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert ret == start def test_steps_are_executed_in_correct_order(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.AFTER_PARSING, 21, match=False), _("eval2", ExecutionContextActions.BEFORE_EVALUATION, 5, match=False), _("eval3", ExecutionContextActions.AFTER_EVALUATION, 12, match=False), _("eval4", ExecutionContextActions.EVALUATION, 99, match=False), _("eval5", ExecutionContextActions.BEFORE_PARSING, 5, match=False), _("eval6", ExecutionContextActions.PARSING, 25, match=False), ] service.execution_plan = service.compute_execution_plan(evaluators) # init test variables start = [_rvc("foo")] service.execute(context, start, ALL_STEPS) # to check what happened, look at the execution context children executed_steps = [ec.action_context["step"] for ec in context.get_children(level=1)] assert executed_steps == ALL_STEPS def test_higher_priority_evaluators_are_executed_first(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=False), _("eval2", ExecutionContextActions.EVALUATION, 5, match=False), _("eval3", ExecutionContextActions.EVALUATION, 20, match=False), _("eval4", ExecutionContextActions.EVALUATION, 99, match=False), ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("foo")] service.execute(context, start, [ExecutionContextActions.EVALUATION]) # to check what happened, look at the execution context children evaluators_executed = [ec.action_context["evaluator"] for ec in context.get_children() if "evaluator" in ec.action_context] assert evaluators_executed == ["eval4", "eval1", "eval3", "eval2"] def test_evaluation_loop_stops_when_no_modification(self, context, service): rv_foo, rv_bar = _rvc("foo"), _rvc("bar") # rv => ReturnValue # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo] service.execute(context, start, [ExecutionContextActions.EVALUATION]) children = [ec for ec in context.get_children() if ec.action == ExecutionContextActions.EVALUATING_ITERATION] assert len(children) == 2 def test_eval_is_not_called_if_match_fails_for_one_return(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("baz")] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": start[0], "match": False}] def test_eval_is_called_if_match_succeed_for_one_return(self, context, service): # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("foo")] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [_rvc("bar")] assert res[0].parents == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": start[0], "match": True, "new": res, "eaten": start}] def test_all_item_are_processed_during_one_return(self, context, service): rv_foo, rv_bar, rv_baz, rv_qux = _rvc("foo"), _rvc("bar"), _rvc("baz"), _rvc("qux") # rv => ReturnValue # properly init the service _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_qux]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_bar, rv_foo, rv_baz] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [rv_bar, rv_qux, rv_baz] # We must keep the order ! rv_qux replaces rv_foo assert res[0].parents is None assert res[1].parents == [rv_foo] assert res[2].parents is None # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == [{"item": rv_bar, "match": False}, {"item": rv_foo, "match": True, "new": [rv_qux], "eaten": [rv_foo]}, {"item": rv_baz, "match": False}] def test_evaluators_with_the_same_priority_do_not_compete_with_each_other_one_return(self, context, service): rv_foo, rv_bar, rv_baz, rv_qux = _rvc("foo"), _rvc("bar"), _rvc("baz"), _rvc("qux") # rv => ReturnValue # properly init the service # both evaluator want to eat 'foo' _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]), _("eval2", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_baz]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_qux, rv_foo, rv_qux] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [rv_qux, rv_bar, rv_baz, rv_qux] # they both eat it ! assert res[1].parents == [rv_foo] assert res[2].parents == [rv_foo] def test_evaluators_with_higher_priority_take_precedence_one_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service # both evaluator want to eat 'foo' _ = OneReturnValueEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_bar]), _("eval2", ExecutionContextActions.EVALUATION, 30, match=lambda r: context.sheerka.isinstance(r.value, "foo"), eval_result=[rv_baz]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [rv_baz] assert res[0].parents == start def test_evaluator_matches_is_called_before_eval_for_all_return(self, context, service): # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda r: context.sheerka.isinstance(r[0].value, "foo"), eval_result=[_rvc("bar")]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [_rvc("baz")] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == start start = [_rvc("foo")] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [_rvc("bar")] assert res[0].parents == start def test_eval_is_not_call_if_match_fails_for_all_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_baz, rv_foo] # foo is not the first in the list res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == start # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == {"match": False} def test_eval_is_called_if_match_succeed_for_all_return(self, context, service): rv_foo, rv_bar, rv_baz = _rvc("foo"), _rvc("bar"), _rvc("baz") # rv => ReturnValue # properly init the service _ = AllReturnValuesEvaluatorForTesting evaluators = [ _("eval1", ExecutionContextActions.EVALUATION, 20, match=lambda lst: context.sheerka.isinstance(lst[0].value, "foo"), eval_result=[rv_bar]) ] service.execution_plan = service.compute_execution_plan(evaluators) start = [rv_foo, rv_baz] res = service.execute(context, start, [ExecutionContextActions.EVALUATION]) assert res == [rv_bar] assert res[0].parents == start children = list(context.get_children()) # check what happen in details exec_context = next(filter(lambda ec: "evaluator" in ec.action_context, context.get_children())) evaluation_trace = exec_context.values["evaluation"] assert evaluation_trace == {"match": True, "new": res, "eaten": start}