Added basic implementation for Python code evaluation
This commit is contained in:
@@ -0,0 +1,23 @@
|
||||
from evaluators.BaseEvaluator import BaseEvaluator
|
||||
from parsers.DefaultParser import DefConceptNode
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AddConceptEvaluator(BaseEvaluator):
|
||||
"""
|
||||
Used to add a new concept
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Add new Concept", 50)
|
||||
|
||||
def matches(self, context, items):
|
||||
return len(items) == 1 and items[0].status and isinstance(items[0].value, DefConceptNode)
|
||||
|
||||
def eval(self, context, items):
|
||||
log.debug("Adding a new concept")
|
||||
node = items[0].value
|
||||
sheerka = context.sheerka
|
||||
return sheerka.add_concept(context, node)
|
||||
@@ -0,0 +1,14 @@
|
||||
class BaseEvaluator:
|
||||
"""
|
||||
base class to evaluate concepts or nodes
|
||||
"""
|
||||
|
||||
def __init__(self, name, priority: int):
|
||||
self.name = name
|
||||
self.priority = priority
|
||||
|
||||
def matches(self, context, items):
|
||||
pass
|
||||
|
||||
def eval(self, context, items):
|
||||
pass
|
||||
@@ -0,0 +1,39 @@
|
||||
from core.sheerka import ReturnValue
|
||||
from core.sheerka import Sheerka
|
||||
from evaluators.BaseEvaluator import BaseEvaluator
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DefaultEvaluator(BaseEvaluator):
|
||||
"""
|
||||
Used to filter the responses of the parsers
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("Default Evaluator", 90)
|
||||
|
||||
def matches(self, context, items):
|
||||
return True
|
||||
|
||||
def eval(self, context, items):
|
||||
successful_results = [item for item in items if item.status]
|
||||
number_of_successful = len(successful_results)
|
||||
total_items = len(items)
|
||||
|
||||
# remove errors when a winner is found
|
||||
if number_of_successful == 1:
|
||||
log.debug(f"1 / {total_items} good item found.")
|
||||
return successful_results
|
||||
|
||||
# too many winners, which one to choose ?
|
||||
if number_of_successful > 1:
|
||||
log.debug(f"{number_of_successful} / {total_items} good items. Too many success")
|
||||
return ReturnValue(self.name,
|
||||
False,
|
||||
context.sheerka.new(Sheerka.TOO_MANY_SUCCESS_CONCEPT_NAME, body=items))
|
||||
|
||||
# only errors, i cannot help you
|
||||
log.debug(f"{total_items} items. Only errors")
|
||||
return items
|
||||
@@ -0,0 +1,32 @@
|
||||
from core.concept import ReturnValueConcept, ErrorConcept
|
||||
from evaluators.BaseEvaluator import BaseEvaluator
|
||||
from parsers.PythonParser import PythonNode
|
||||
import ast
|
||||
from core.sheerka import ReturnValue, Sheerka
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PythonEvaluator(BaseEvaluator):
|
||||
def __init__(self):
|
||||
super().__init__("Python Evaluator", 50)
|
||||
|
||||
def matches(self, context, items):
|
||||
return len(items) == 1 and isinstance(items[0].value, PythonNode)
|
||||
|
||||
def eval(self, context, items):
|
||||
sheerka = context.sheerka
|
||||
node = items[0].value
|
||||
if isinstance(node.ast, ast.Expression):
|
||||
try:
|
||||
log.debug("Evaluating python expression")
|
||||
compiled = compile(node.ast, "<string>", "eval")
|
||||
evaluated = eval(compiled, {}, {"sheerka": context.sheerka})
|
||||
concept = sheerka.new(ReturnValueConcept.NAME, body=evaluated)
|
||||
return ReturnValue(self.name, True, concept)
|
||||
except Exception as error:
|
||||
error = sheerka.new(ErrorConcept.NAME, body=error)
|
||||
return ReturnValue(self.name, False, error)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
Reference in New Issue
Block a user