84 lines
3.3 KiB
Python
84 lines
3.3 KiB
Python
from core.builtin_concepts import BuiltinConcepts
|
|
import core.builtin_helpers
|
|
from core.concept import Concept
|
|
from evaluators.BaseEvaluator import AllReturnValuesEvaluator, BaseEvaluator
|
|
from evaluators.ConceptEvaluator import ConceptEvaluator
|
|
from evaluators.PythonEvaluator import PythonEvaluator
|
|
from parsers.BaseParser import BaseParser
|
|
|
|
|
|
class MultipleSameSuccessEvaluator(AllReturnValuesEvaluator):
|
|
"""
|
|
Used to filter the responses
|
|
It has a low priority to let other evaluators try to resolve the errors
|
|
|
|
It reduces the responses when several evaluators give the same answer
|
|
"""
|
|
|
|
NAME = "MultipleSameSuccess"
|
|
|
|
def __init__(self):
|
|
super().__init__(self.NAME, [BuiltinConcepts.AFTER_EVALUATION], 50)
|
|
self.success = []
|
|
|
|
def matches(self, context, return_values):
|
|
nb_successful_evaluators = 0
|
|
only_parsers_in_error = True
|
|
to_process = False
|
|
|
|
for ret in return_values:
|
|
|
|
if ret.status and context.sheerka.isinstance(ret.body, BuiltinConcepts.REDUCE_REQUESTED):
|
|
to_process = True
|
|
self.eaten.append(ret)
|
|
elif ret.who.startswith(BaseEvaluator.PREFIX):
|
|
if ret.status:
|
|
nb_successful_evaluators += 1
|
|
self.success.append(ret)
|
|
self.eaten.append(ret)
|
|
elif ret.who.startswith(BaseParser.PREFIX):
|
|
self.eaten.append(ret)
|
|
if ret.status:
|
|
only_parsers_in_error = False
|
|
|
|
return to_process and nb_successful_evaluators > 1 and only_parsers_in_error
|
|
|
|
def eval(self, context, return_values):
|
|
sheerka = context.sheerka
|
|
context.log(f"{len(self.success)} successful return value(s)", who=self)
|
|
for s in self.success:
|
|
context.log(f"{s}", who=self)
|
|
|
|
if not core.builtin_helpers.is_same_success(context, self.success):
|
|
return None
|
|
|
|
# ######################################
|
|
# !!!!! W A R N I N G !!!!!!!!
|
|
# I have a massive issue with how I implement this feature
|
|
# I have forced an arbitrary order between Concept evaluator and Python evaluator
|
|
# I gave a random order to the other
|
|
#
|
|
# I guess that we need a proper algorithm to elect which return value to use if they have the same result
|
|
# I guts feeling is that, it will depend on the intent of the user
|
|
# So it depends on the context
|
|
|
|
# try to return a concept if possible
|
|
# give the priority to the ConceptEvaluator
|
|
for s in self.success:
|
|
if isinstance(s.value, Concept) and s.who == ConceptEvaluator().name:
|
|
return sheerka.ret(self.name, True, s.value, parents=self.eaten)
|
|
|
|
# Then the PythonEvaluator
|
|
for s in self.success:
|
|
if isinstance(s.value, Concept) and s.who == PythonEvaluator().name:
|
|
return sheerka.ret(self.name, True, s.value, parents=self.eaten)
|
|
|
|
# Then the first concept.
|
|
# It's not predictable, so I guess that it's not a good implementation choice
|
|
for s in self.success:
|
|
if isinstance(s.value, Concept):
|
|
return sheerka.ret(self.name, True, s.value, parents=self.eaten)
|
|
|
|
return sheerka.ret(self.name, True, self.success[0].value, parents=self.eaten)
|
|
|