Added basic implementation for Python code evaluation

This commit is contained in:
2019-11-07 17:18:07 +01:00
parent b818c992ec
commit 448ebc696a
18 changed files with 501 additions and 156 deletions
+39
View File
@@ -0,0 +1,39 @@
from core.sheerka import ReturnValue
from core.sheerka import Sheerka
from evaluators.BaseEvaluator import BaseEvaluator
import logging
log = logging.getLogger(__name__)
class DefaultEvaluator(BaseEvaluator):
"""
Used to filter the responses of the parsers
"""
def __init__(self):
super().__init__("Default Evaluator", 90)
def matches(self, context, items):
return True
def eval(self, context, items):
successful_results = [item for item in items if item.status]
number_of_successful = len(successful_results)
total_items = len(items)
# remove errors when a winner is found
if number_of_successful == 1:
log.debug(f"1 / {total_items} good item found.")
return successful_results
# too many winners, which one to choose ?
if number_of_successful > 1:
log.debug(f"{number_of_successful} / {total_items} good items. Too many success")
return ReturnValue(self.name,
False,
context.sheerka.new(Sheerka.TOO_MANY_SUCCESS_CONCEPT_NAME, body=items))
# only errors, i cannot help you
log.debug(f"{total_items} items. Only errors")
return items