Implemented some enhancement requests

This commit is contained in:
2020-12-14 10:30:10 +01:00
parent 657c7536f7
commit e3c2adb533
46 changed files with 352 additions and 1286 deletions
+4 -4
View File
@@ -86,7 +86,7 @@ def concept plus from a plus b as a + b
def concept minus from a minus b as a - b
def concept multiplied from a multiplied by b as a * b
def concept divided from a divided by b as a * b
set_is_greater_than(__PRECEDENCE, multiplied, plus)
set_is_greater_than(__PRECEDENCE, divided, plus)
set_is_greater_than(__PRECEDENCE, multiplied, minus)
set_is_greater_than(__PRECEDENCE, divided, minus)
set_is_greater_than(__PRECEDENCE, multiplied, plus, 'Sya')
set_is_greater_than(__PRECEDENCE, divided, plus, 'Sya')
set_is_greater_than(__PRECEDENCE, multiplied, minus, 'Sya')
set_is_greater_than(__PRECEDENCE, divided, minus, 'Sya')
+3 -4
View File
@@ -60,9 +60,9 @@ class ReturnValueConcept(Concept):
It's the main input for the evaluators
"""
ALL_ATTRIBUTES = ["who", "status", "value", "parents", "message"]
ALL_ATTRIBUTES = ["who", "status", "value", "parents"]
def __init__(self, who=None, status=None, value=None, parents=None, message=None, concept_id=None):
def __init__(self, who=None, status=None, value=None, parents=None, concept_id=None):
Concept.__init__(self,
BuiltinConcepts.RETURN_VALUE,
True,
@@ -74,11 +74,10 @@ class ReturnValueConcept(Concept):
self.set_value("status", status)
self.set_value("value", value)
self.set_value("parents", parents)
self.set_value("message", message)
self._metadata.is_evaluated = True
def __repr__(self):
return f"ReturnValue(who={self.who}, status={self.status}, value={self.value}, message={self.message})"
return f"ReturnValue(who={self.who}, status={self.status}, value={self.value})"
def __eq__(self, other):
if id(self) == id(other):
+4 -2
View File
@@ -67,7 +67,8 @@ class BuiltinConcepts:
TOO_MANY_SUCCESS = "__TOO_MANY_SUCCESS" # when expecting a limited number of successful return value
TOO_MANY_ERRORS = "__TOO_MANY_ERRORS" # when expecting a limited number of successful return value
ONLY_SUCCESSFUL = "__ONLY_SUCCESSFUL" # filter the result, only keep successful ones
MULTIPLE_ERRORS = "__MULTIPLE_ERRORS" # filter the result, only keep evaluator in error
MULTIPLE_ERRORS = "__MULTIPLE_ERRORS" # filter the result, only keep evaluators in error
MULTIPLE_SUCCESS = "__MULTIPLE_SUCCESS" # filter the result, only keep successful evaluators
NOT_FOR_ME = "__NOT_FOR_ME" # a parser recognize that the entry is not meant for it
IS_EMPTY = "__IS_EMPTY" # when a set is empty
NO_RESULT = "__NO_RESULT" # no return value returned
@@ -78,7 +79,7 @@ class BuiltinConcepts:
CONCEPT_EVAL_ERROR = "__CONCEPT_EVAL_ERROR" # cannot evaluate a property or metadata of a concept
ENUMERATION = "__ENUMERATION" # represents a list or a set
LIST = "__LIST" # represents a list
FILTERED = "__FILTERED" # represents the result of a filtering
FILTERED = "__FILTERED" # represents the result of a filtering, the filtering condition should be indicated
CONCEPT_ALREADY_IN_SET = "__CONCEPT_ALREADY_IN_SET"
NOT_A_SET = "__NOT_A_SET" # the concept has no entry in sets
CONDITION_FAILED = "__CONDITION_FAILED" # failed to validate where clause during evaluation
@@ -176,6 +177,7 @@ BuiltinContainers = [
BuiltinConcepts.TO_LIST,
BuiltinConcepts.TO_DICT,
BuiltinConcepts.TO_MULTI,
BuiltinConcepts.MULTIPLE_SUCCESS,
]
BuiltinOutConcepts = [
+2 -2
View File
@@ -7,7 +7,7 @@ from core.sheerka.services.SheerkaExecute import SheerkaExecute
from core.tokenizer import Keywords
from parsers.BaseNodeParser import SourceCodeNode, ConceptNode, UnrecognizedTokensNode, SourceCodeWithConceptNode, \
RuleNode
from parsers.BaseParser import BaseParser, ErrorNode
from parsers.BaseParser import BaseParser, ParsingError
PARSE_STEPS = [BuiltinConcepts.BEFORE_PARSING, BuiltinConcepts.PARSING, BuiltinConcepts.AFTER_PARSING]
EVAL_STEPS = PARSE_STEPS + [BuiltinConcepts.BEFORE_EVALUATION, BuiltinConcepts.EVALUATION,
@@ -254,7 +254,7 @@ def only_parsers_results(context, return_values):
# hack because some parsers don't follow the NOT_FOR_ME rule
temp_ret_val = []
for ret_val in return_values_ok:
if isinstance(ret_val.body.body, ErrorNode):
if isinstance(ret_val.body.body, ParsingError):
continue
if isinstance(ret_val.body.body, list) and \
len(ret_val.body.body) == 1 and \
+3 -13
View File
@@ -101,9 +101,9 @@ class Sheerka(Concept):
"test_using_context": SheerkaMethod(self.test_using_context, False),
"test_dict": SheerkaMethod(self.test_dict, False)
}
self.sheerka_pipeables = {}
self.locals = {}
self.concepts_ids = None
@property
def resolved_concepts_by_first_keyword(self):
@@ -149,16 +149,6 @@ class Sheerka(Concept):
setattr(self, as_name, bound_method)
def add_pipeable(self, func_name, function, has_side_effect):
"""
Adds a function that can bu used with pipe '|'
:param func_name:
:param function:
:param has_side_effect:
:return:
"""
self.sheerka_pipeables[func_name] = SheerkaMethod(function, has_side_effect)
def initialize(self, root_folder: str = None, save_execution_context=None, enable_process_return_values=None):
"""
Starting Sheerka
@@ -288,6 +278,7 @@ class Sheerka(Concept):
from core.sheerka.services.SheerkaConceptManager import SheerkaConceptManager
concept_service = self.services[SheerkaConceptManager.NAME]
concepts_ids = concept_service.initialize_builtin_concepts()
self.concepts_ids = concepts_ids
self.return_value_concept_id = concepts_ids[BuiltinConcepts.RETURN_VALUE]
self.error_concept_id = concepts_ids[BuiltinConcepts.ERROR]
@@ -565,13 +556,12 @@ class Sheerka(Concept):
concept._metadata.is_evaluated = True # because we have manually set the variables
return concept
def ret(self, who: str, status: bool, value, message=None, parents=None):
def ret(self, who: str, status: bool, value, parents=None):
"""
Creates and returns a ReturnValue concept
:param who:
:param status:
:param value:
:param message:
:param parents:
:return:
"""
+3 -3
View File
@@ -458,7 +458,7 @@ class SheerkaExecute(BaseService):
original_items = return_values[:]
evaluated_items = []
to_delete = []
to_delete = set()
for evaluator in grouped_evaluators[priority]:
evaluator.reset()
@@ -496,7 +496,7 @@ class SheerkaExecute(BaseService):
continue
# otherwise, item will be removed and replaced by result
to_delete.append(item)
to_delete.add(item)
if isinstance(result, list):
evaluated_items.extend(result)
elif isinstance(result, ReturnValueConcept):
@@ -525,7 +525,7 @@ class SheerkaExecute(BaseService):
for result in results:
if result.body != BuiltinConcepts.NO_RESULT:
evaluated_items.append(result)
to_delete.extend(result.parents)
to_delete.update(result.parents)
sub_context.add_values(return_values=results)
else:
sub_context.add_values(return_values=NO_MATCH)
-455
View File
@@ -1,455 +0,0 @@
# the principle and the Pipe class are taken from
# https://github.com/JulienPalard/Pipe
#
import builtins
import functools
import inspect
import itertools
import sys
from collections import deque
from cache.Cache import Cache
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept, ConceptParts
from core.sheerka.services.sheerka_service import BaseService
from core.utils import as_bag
from printer.FormatInstructions import FormatInstructions
from sheerkapickle.utils import is_primitive
class PropDesc:
def __init__(self, class_name, props):
self.class_name = class_name
self.props = props
def __repr__(self):
return f"({self.class_name}{self.props})"
def __eq__(self, other):
if id(other) == id(self):
return True
if not isinstance(other, PropDesc):
return False
return self.class_name == other.class_name and sorted(self.props) == sorted(other.props)
class Pipe:
"""
Represent a Pipeable Element :
Described as :
first = Pipe(lambda iterable: next(iter(iterable)))
and used as :
print [1, 2, 3] | first
printing 1
Or represent a Pipeable Function :
It's a function returning a Pipe
Described as :
select = Pipe(lambda iterable, pred: (pred(x) for x in iterable))
and used as :
print [1, 2, 3] | select(lambda x: x * 2)
# 2, 4, 6
"""
def __init__(self, function, context=None):
self.context = context
if isinstance(function, Pipe):
self.function = function.function
self.need_context = function.need_context
else:
signature = inspect.signature(function)
if len(signature.parameters) > 0 and list(signature.parameters.keys())[0] == "context":
self.need_context = True
self.function = (lambda x: function(context, x)) if len(signature.parameters) == 2 else function
else:
self.need_context = False
self.function = function
functools.update_wrapper(self, function)
def __ror__(self, other):
if isinstance(other, Concept) and other.key == str(BuiltinConcepts.EXPLANATION):
other.set_value(ConceptParts.BODY, self.function(other.body))
return other
return self.function(other)
def __call__(self, *args, **kwargs):
if self.need_context:
return Pipe(lambda x: self.function(self.context, x, *args, **kwargs), self.context)
else:
return Pipe(lambda x: self.function(x, *args, **kwargs), self.context)
class SheerkaFilter(BaseService):
NAME = "Filter"
PREDICATES_ENTRY = "Filter:Predicates"
def __init__(self, sheerka):
super().__init__(sheerka)
self.cache = Cache(max_size=30)
def initialize(self):
# For a weird reason, when the attribute @Pipe is directly added to the function
# all following instances have the context property null
for k, v in SheerkaFilter.__dict__.items():
if k.startswith("pipe_"):
if isinstance(v, staticmethod):
self.sheerka.add_pipeable(k[5:], v.__func__, True)
else:
self.sheerka.add_pipeable(k[5:], v.__get__(self, self.__class__), True)
self.sheerka.cache_manager.register_cache(self.PREDICATES_ENTRY, self.cache, False, False)
def get_compiled(self, file_name, predicate):
"""
Returns the compiled version of the predicate
:param file_name:
:param predicate:
:return:
"""
compiled = self.cache.get(predicate)
if compiled is not None:
return compiled
compiled = compile(predicate, f"<{file_name}>", "eval")
self.cache.put(predicate, compiled)
return compiled
@staticmethod
def pipe_first(iterable):
"""
Return the first element of the list
:param iterable:
:return:
"""
return next(iter(iterable))
@staticmethod
def pipe_take(iterable, n):
"""
Take the n first element of a list
:param iterable:
:param n:
:return:
"""
for item in iterable:
if n > 0:
n -= 1
yield item
else:
return
@staticmethod
def pipe_props(iterable):
"""
Return the list of available properties of the iterable
:return:
"""
for item in iterable:
yield PropDesc(type(item).__name__, list(as_bag(item).keys()))
@staticmethod
def pipe_tail(iterable, qte):
"Yield qte of elements in the given iterable."
return deque(iterable, maxlen=qte)
@staticmethod
def pipe_skip(iterable, qte):
"Skip qte elements in the given iterable, then yield others."
for item in iterable:
if qte == 0:
yield item
else:
qte -= 1
@staticmethod
def pipe_dedup(iterable, key=lambda x: x):
"""Only yield unique items. Use a set to keep track of duplicate data."""
seen = set()
for item in iterable:
dupkey = key(item)
if dupkey not in seen:
seen.add(dupkey)
yield item
@staticmethod
def pipe_uniq(iterable, key=lambda x: x):
"""Deduplicate consecutive duplicate values."""
iterator = iter(iterable)
try:
prev = next(iterator)
except StopIteration:
return
yield prev
prevkey = key(prev)
for item in iterator:
itemkey = key(item)
if itemkey != prevkey:
yield item
prevkey = itemkey
@staticmethod
def pipe_all(iterable, pred):
"""Returns True if ALL elements in the given iterable are true for the
given pred function"""
return builtins.all(pred(x) for x in iterable)
@staticmethod
def pipe_any(iterable, pred):
"""Returns True if ANY element in the given iterable is True for the
given pred function"""
return builtins.any(pred(x) for x in iterable)
@staticmethod
def pipe_average(iterable):
"""Build the average for the given iterable, starting with 0.0 as seed
Will try a division by 0 if the iterable is empty...
"""
# warnings.warn(
# "pipe.average is deprecated, use statistics.mean instead.",
# DeprecationWarning,
# stacklevel=4,
# )
total = 0.0
qte = 0
for element in iterable:
total += element
qte += 1
return total / qte
@staticmethod
def pipe_count(iterable):
"Count the size of the given iterable, walking thrue it."
# warnings.warn(
# "pipe.count is deprecated, use the builtin len() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
count = 0
for element in iterable:
count += 1
return count
@staticmethod
def pipe_max(iterable, **kwargs):
# warnings.warn(
# "pipe.max is deprecated, use the builtin max() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return builtins.max(iterable, **kwargs)
@staticmethod
def pipe_min(iterable, **kwargs):
# warnings.warn(
# "pipe.min is deprecated, use the builtin min() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return builtins.min(iterable, **kwargs)
@staticmethod
def pipe_as_dict(iterable):
# warnings.warn(
# "pipe.as_dict is deprecated, use dict(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return dict(iterable)
@staticmethod
def pipe_as_set(iterable):
# warnings.warn(
# "pipe.as_set is deprecated, use set(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return set(iterable)
@staticmethod
def pipe_permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
for x in itertools.permutations(iterable, r):
yield x
# @staticmethod
# def pipe_netcat(to_send, host, port):
# with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
# s.connect((host, port))
# for data in to_send | traverse:
# s.send(data)
# while 1:
# data = s.recv(4096)
# if not data:
# break
# yield data
#
# @staticmethod
# def pipe_netwrite(to_send, host, port):
# warnings.warn("pipe.netwite is deprecated.", DeprecationWarning, stacklevel=4)
# with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
# s.connect((host, port))
# for data in to_send | SheerkaFilter.pipe_traverse:
# s.send(data)
@staticmethod
def pipe_traverse(iterable):
for arg in iterable:
try:
if isinstance(arg, str):
yield arg
else:
for i in arg | SheerkaFilter.pipe_traverse:
yield i
except TypeError:
# not iterable --- output leaf
yield arg
@staticmethod
def pipe_concat(iterable, separator=", "):
# warnings.warn(
# "pipe.concat is deprecated, use ', '.join(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return separator.join(builtins.map(str, iterable))
@staticmethod
def pipe_as_list(iterable):
# warnings.warn(
# "pipe.as_list is deprecated, use list(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return list(iterable)
@staticmethod
def pipe_as_tuple(iterable):
# warnings.warn(
# "pipe.as_tuple is deprecated, use tuple(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return tuple(iterable)
@staticmethod
def pipe_tee(iterable):
for item in iterable:
sys.stdout.write(str(item) + "\n")
yield item
@staticmethod
def pipe_to_file(iterable, fname, glue="\n"):
with open(fname, "w") as f:
for item in iterable:
f.write(str(item) + glue)
@staticmethod
def pipe_add(x):
# warnings.warn(
# "pipe.add is deprecated, use sum(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return sum(x)
@staticmethod
def pipe_select(iterable, selector):
return builtins.map(selector, iterable)
@staticmethod
def pipe_format_l(iterable, template, when=None):
"""
Define a formatting when printing a list of items
:param iterable:
:param template:
:param when: format_l is set when the condition is verified
:return:
"""
for item in iterable:
if hasattr(item, "get_format_instructions"):
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_format_l(item, template)
item.set_format_instructions(instructions)
yield item
@staticmethod
def pipe_format_d(iterable, *props, when=None, **format_l):
"""
Define a formatting when printing the detail of an item
:param iterable:
:param props: list of properties to display
:param when: format_d is set when the condition is verified
:param format_l: custom formatting when printing the value of a property
:return:
"""
template = dict((p, "{" + p + "}") for p in props)
for k, v in format_l.items():
template[k] = v
for item in iterable:
if hasattr(item, "get_format_instructions"):
if len(template) == 0:
bag = as_bag(item)
template = dict((p, "{" + p + "}") for p in bag)
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_format_d(item, template)
item.set_format_instructions(instructions)
yield item
@staticmethod
def pipe_recurse(iterable, depth, prop_name="_children", when=None):
"""
When printing an object that has sub properties,
indicate the depth of recursion to apply to a specific properties
Quick and dirty version because the prop name is not taken from the item (but set to '_children' by default)
:param iterable:
:param depth:
:param prop_name:
:param when: recurse is set when the condition is verified
:return:
"""
for item in iterable:
if hasattr(item, "get_format_instructions"):
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_recurse(prop_name, depth)
item.set_format_instructions(instructions)
yield item
def pipe_filter(self, iterable, predicate):
compiled = self.get_compiled("filter", predicate)
for item in iterable:
try:
context = {} if is_primitive(item) else as_bag(item)
context["self"] = item
if eval(compiled, context):
yield item
except NameError:
pass
def pipe_inspect(self, iterable, path, when=None):
"""
Follow the path
:param iterable:
:param path:
:param when:
:return:
"""
# quick and dirty implementation as it does not handle dictionaries items
for item in iterable:
try:
props = path.split(".")
for prop in props:
compiled = self.get_compiled("inspect", prop)
context = {} if is_primitive(item) else as_bag(item)
item = eval(compiled, context)
yield item
except Exception as ex:
yield ex
+2 -2
View File
@@ -59,11 +59,11 @@ class SheerkaOut(BaseService):
sub_context.protected_hints.add(BuiltinConcepts.EVAL_WHERE_REQUESTED)
sub_context.protected_hints.add(BuiltinConcepts.EVAL_UNTIL_SUCCESS_REQUESTED)
sub_context.protected_hints.add(BuiltinConcepts.EVAL_QUESTION_REQUESTED)
# sub_context.deactivate_push()
sub_context.deactivate_push()
out_tree = self.create_out_tree(sub_context, ret)
# sub_context.activate_push()
sub_context.activate_push()
if out_tree:
for visitor in self.out_visitors:
@@ -654,43 +654,48 @@ class SheerkaRuleManager(BaseService):
def init_builtin_rules(self, context):
# self.sheerka.init_log.debug("Initializing default rules")
rules = [
# [0] Rule #1 Rule #2 in debug
# index=[0] in code, id=1 Rule #2 in debug
Rule("print", "Print return values", "__rets", "list(__rets)"),
# [1] Rule #2 in debug
# index=[1] in code, id=2 in debug
Rule("print", "Print ReturnValue",
"__ret",
"\\ReturnValue(who={__ret.who}, status={__ret.status}, value={__ret.value})"),
# [2] Rule #3 in debug
# index=[2] in code, id=3 in debug
Rule("print", "Failed ReturnValue in red",
"__ret and not __ret.status",
"red(__ret)"),
# [3] Rule #4 in debug
# index=[3] in code, id=4 in debug
Rule("print", "List explanations",
"isinstance(__ret_container, BuiltinConcepts.EXPLANATION)",
"blue(__ret_container.digest) : {__ret_container.command}\nlist(__ret_container)"),
# [4] Rule #5 in debug
# index=[4] in code, id=5 in debug
Rule("print", "Print ExecutionContext",
"isinstance(__obj, ExecutionContext)",
"[{id:3}] {__tab}{desc} ({status})"),
# [6] Rule #7 in debug
# index=[5] in code, id=6 in debug
Rule("print", "Display formatted list",
"isinstance(__ret_container, BuiltinConcepts.TO_LIST)",
"list(__ret_container)"),
# [7] Rule #8 in debug
# index=[6] in code, id=7 in debug
Rule("print", "Display formatted dict",
"isinstance(__ret_container, BuiltinConcepts.TO_DICT)",
"dict(__ret_container)"),
# [8] Rule #9 in debug
# index=[7] in code, id=8 in debug
Rule("print", "Display multiple outputs",
"isinstance(__ret_container, BuiltinConcepts.TO_MULTI)",
"multi(__ret_container)"),
# index=[8] in code, id=9 in debug
Rule("print", "Display multiple success",
"isinstance(__ret_container, BuiltinConcepts.MULTIPLE_SUCCESS)",
"list(__ret_container.body)"),
]
for r in rules:
@@ -700,6 +705,7 @@ class SheerkaRuleManager(BaseService):
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[3], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[5], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[6], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[8], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_greater_than(context, BuiltinConcepts.PRECEDENCE, rules[7], rules[6],
RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_greater_than(context, BuiltinConcepts.PRECEDENCE, rules[7], rules[5],
+1 -2
View File
@@ -3,8 +3,7 @@ from core.builtin_concepts import BuiltinConcepts, ParserResultConcept
from core.rule import Rule
from core.tokenizer import Keywords
from evaluators.BaseEvaluator import OneReturnValueEvaluator
from parsers.BaseParser import BaseParser
from parsers.FormatRuleParser import FormatRuleNode
from parsers.DefFormatRuleParser import FormatRuleNode
class FormatRuleEvaluator(OneReturnValueEvaluator):
+1 -1
View File
@@ -5,7 +5,7 @@ from parsers.BaseParser import BaseParser
class MultipleErrorsEvaluator(AllReturnValuesEvaluator):
"""
Use to reduce to evaluator errors
Used to reduce to evaluator errors
All parser error will be discarded
Cannot match if there is at least one successful evaluator
"""
@@ -0,0 +1,67 @@
from core.builtin_concepts import BuiltinConcepts
from evaluators.BaseEvaluator import AllReturnValuesEvaluator, BaseEvaluator
from parsers.BaseParser import BaseParser
class MultipleSuccessEvaluator(AllReturnValuesEvaluator):
"""
Used to reduce evaluators
It's used when there are multiple successful return values, but their values are not evaluated
So we cannot decide whether it's a MultipleSameSuccess or not
All parser in error will be discarded
Cannot match if there is at least one evaluator in error
"""
NAME = "MultipleSuccess"
def __init__(self):
super().__init__(self.NAME, [BuiltinConcepts.AFTER_EVALUATION], 30)
self.successful_return_values = []
def reset(self):
super().reset()
self.successful_return_values.clear()
def matches(self, context, return_values):
nb_evaluators_in_success = 0
to_process = False
for ret in return_values:
if ret.status and ret.who.startswith(BaseParser.PREFIX):
return False
elif ret.who.startswith(BaseEvaluator.PREFIX) and not ret.status:
return False
elif ret.status and context.sheerka.isinstance(ret.body, BuiltinConcepts.REDUCE_REQUESTED):
to_process = True
self.eaten.append(ret)
elif ret.status and ret.who.startswith(BaseEvaluator.PREFIX):
if self.already_seen(context, ret):
return False
nb_evaluators_in_success += 1
self.successful_return_values.append(ret)
self.eaten.append(ret)
elif not ret.status and ret.who.startswith(BaseParser.PREFIX):
self.eaten.append(ret)
# else:
# other concepts. We do not care if there are successful or not
# They won't be part of result nor part of the parent
# --> So they will be handled by other evaluators
return to_process and nb_evaluators_in_success > 1
def eval(self, context, return_values):
context.log(f"{len(self.successful_return_values)} successful return values, {len(self.eaten)} item(s) eaten",
who=self)
context.log(f"{self.successful_return_values}", who=self)
sheerka = context.sheerka
return sheerka.ret(
self.name,
True,
sheerka.new(BuiltinConcepts.MULTIPLE_SUCCESS, body=self.successful_return_values.copy()),
parents=self.eaten)
def already_seen(self, context, ret_val):
for successful in self.successful_return_values:
if context.sheerka.objvalue(successful.value) == context.sheerka.objvalue(ret_val.value):
return True
return False
+12 -12
View File
@@ -2,7 +2,7 @@ from dataclasses import dataclass, field
import core.utils
from core.tokenizer import Keywords, TokenKind, Tokenizer
from parsers.BaseParser import BaseParser, Node, ErrorNode, UnexpectedEofNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, Node, ParsingError, UnexpectedEofParsingError, UnexpectedTokenParsingError
@dataclass()
@@ -14,7 +14,7 @@ class CustomGrammarParserNode(Node):
@dataclass()
class SyntaxErrorNode(CustomGrammarParserNode, ErrorNode):
class SyntaxErrorNode(CustomGrammarParserNode, ParsingError):
"""
The input is recognized, but there is a syntax error
"""
@@ -40,7 +40,7 @@ class SyntaxErrorNode(CustomGrammarParserNode, ErrorNode):
@dataclass()
class KeywordNotFound(CustomGrammarParserNode, ErrorNode):
class KeywordNotFound(CustomGrammarParserNode, ParsingError):
keywords: list
def __eq__(self, other):
@@ -98,12 +98,12 @@ class BaseCustomGrammarParser(BaseParser):
return None
if tokens[pos].type != TokenKind.NEWLINE:
self.add_error(UnexpectedTokenErrorNode("New line not found.", tokens[pos], [TokenKind.NEWLINE]))
self.add_error(UnexpectedTokenParsingError("New line not found.", tokens[pos], [TokenKind.NEWLINE]))
return None
pos += 1
if tokens[pos].type != TokenKind.WHITESPACE:
self.add_error(UnexpectedTokenErrorNode("Indentation not found.", tokens[pos], [TokenKind.WHITESPACE]))
self.add_error(UnexpectedTokenParsingError("Indentation not found.", tokens[pos], [TokenKind.WHITESPACE]))
return None
indent_size = get_tab_size(self.DEFAULT_TAB_SIZE, tokens[pos].value)
@@ -113,9 +113,9 @@ class BaseCustomGrammarParser(BaseParser):
while i < len(tokens) - 1:
if tokens[i].type == TokenKind.NEWLINE:
if tokens[i + 1].type != TokenKind.WHITESPACE:
self.add_error(UnexpectedTokenErrorNode("Indentation not found.",
tokens[i + 1],
[TokenKind.WHITESPACE]))
self.add_error(UnexpectedTokenParsingError("Indentation not found.",
tokens[i + 1],
[TokenKind.WHITESPACE]))
return None
if get_tab_size(self.DEFAULT_TAB_SIZE, tokens[i + 1].value) < indent_size:
@@ -177,9 +177,9 @@ class BaseCustomGrammarParser(BaseParser):
token = self.parser_input.token
if expected_first_token and token.value != expected_first_token.value:
self.add_error(UnexpectedTokenErrorNode(f"'{expected_first_token.value}' keyword not found.",
token,
[expected_first_token]))
self.add_error(UnexpectedTokenParsingError(f"'{expected_first_token.value}' keyword not found.",
token,
[expected_first_token]))
return None
if token.value not in keywords:
@@ -225,7 +225,7 @@ class BaseCustomGrammarParser(BaseParser):
res[keyword] = [token] # to keep track of when it starts
colon_mode_activated = self.parser_input.the_token_after().type == TokenKind.COLON
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"While parsing keyword '{keyword.value}'."))
self.add_error(UnexpectedEofParsingError(f"While parsing keyword '{keyword.value}'."))
break
else:
res[keyword].append(token)
+3 -3
View File
@@ -8,7 +8,7 @@ from core.builtin_concepts import BuiltinConcepts
from core.concept import VARIABLE_PREFIX, Concept, DEFINITION_TYPE_BNF, ConceptParts
from core.rule import Rule
from core.tokenizer import TokenKind, Token
from parsers.BaseParser import Node, BaseParser, ErrorNode
from parsers.BaseParser import Node, BaseParser, ParsingError
DEBUG_COMPILED = True
@@ -19,7 +19,7 @@ class ChickenAndEggError(Exception):
@dataclass
class NoFirstTokenError(ErrorNode):
class NoFirstTokenError(ParsingError):
concept: Concept
key: str
@@ -424,7 +424,7 @@ class SourceCodeWithConceptNode(LexerNode):
@dataclass()
class GrammarErrorNode(ErrorNode):
class GrammarErrorNode(ParsingError):
message: str
+13 -208
View File
@@ -35,14 +35,6 @@ class Node:
pass
@dataclass()
class NopNode(Node):
pass
def __repr__(self):
return "nop"
class NotInitializedNode(Node):
pass
@@ -51,12 +43,12 @@ class NotInitializedNode(Node):
@dataclass()
class ErrorNode(Node, ErrorObj):
class ParsingError(Node, ErrorObj):
pass
@dataclass()
class UnexpectedTokenErrorNode(ErrorNode):
class UnexpectedTokenParsingError(ParsingError):
message: str
token: Union[Token, str]
expected_tokens: list
@@ -65,7 +57,7 @@ class UnexpectedTokenErrorNode(ErrorNode):
if id(other) == id(self):
return True
if not isinstance(other, UnexpectedTokenErrorNode):
if not isinstance(other, UnexpectedTokenParsingError):
return False
if self.message != other.message:
@@ -82,8 +74,8 @@ class UnexpectedTokenErrorNode(ErrorNode):
@dataclass()
class UnexpectedEofNode(ErrorNode):
message: str
class UnexpectedEofParsingError(ParsingError):
message: str = None
class BaseParser:
@@ -214,46 +206,16 @@ class BaseParser:
return parser_input.value
@staticmethod
def manage_eof(lst, strip_eof):
if strip_eof:
if len(lst) and lst[-1].type == TokenKind.EOF:
lst.pop()
return lst
if len(lst) == 0 or not lst[-1].type == TokenKind.EOF:
lst.append(Token(TokenKind.EOF, "", -1, -1, -1))
return lst
# @staticmethod
# def get_text_from_tokens(tokens, custom_switcher=None, tracker=None):
# """
# Create the source code, from the list of token
# :param tokens: list of tokens
# :param custom_switcher: to override the behaviour (the return value) of some token
# :param tracker: keep track of the original token value when custom switched
# :return:
# """
# if tokens is None:
# return ""
# res = ""
# def manage_eof(lst, strip_eof):
# if strip_eof:
# if len(lst) and lst[-1].type == TokenKind.EOF:
# lst.pop()
# return lst
#
# if not hasattr(tokens, "__iter__"):
# tokens = [tokens]
#
# switcher = {
# # TokenKind.CONCEPT: lambda t: core.utils.str_concept(t.value),
# }
#
# if custom_switcher:
# switcher.update(custom_switcher)
#
# for token in tokens:
# value = switcher.get(token.type, lambda t: t.str_value)(token)
# res += value
# if tracker is not None and token.type in custom_switcher:
# tracker[value] = token.value
# return res
# if len(lst) == 0 or not lst[-1].type == TokenKind.EOF:
# lst.append(Token(TokenKind.EOF, "", -1, -1, -1))
# return lst
@staticmethod
def get_tokens_boundaries(tokens):
@@ -302,160 +264,3 @@ class BaseParser:
@staticmethod
def get_name(name):
return BaseParser.PREFIX + name
class BaseTokenizerIterParser(BaseParser):
def __init__(self, name, priority, parse_word=False, none_on_eof=True):
super().__init__(name, priority)
self.lexer_iter = None
self._current = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
self.parse_word = parse_word
self.none_on_eof = none_on_eof
def reset_parser(self, context, text):
self.context = context
self.sheerka = context.sheerka
self.text = text
self.lexer_iter = iter(Tokenizer(text, self.parse_word))
self._current = None
self.next_token()
def add_error(self, error, next_token=True):
self.error_sink.append(error)
if next_token:
self.next_token()
return error
def get_token(self) -> Token:
return self._current
def next_token(self, skip_whitespace=True):
try:
self._current = next(self.lexer_iter)
if self.none_on_eof and self._current.type == TokenKind.EOF:
self._current = None
return False
if skip_whitespace:
while self._current.type == TokenKind.WHITESPACE or self._current.type == TokenKind.NEWLINE:
self._current = next(self.lexer_iter)
except StopIteration:
self._current = None
return False
return True
class BaseSplitIterParser(BaseParser):
def __init__(self, name, priority, none_on_eof=False):
super().__init__(name, priority)
self._current = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
self.iter_split = None
self.split_and_eat_tokens = (" ", "\n", "\t")
self.split_and_keep_tokens = ("=", ")", "(", ",")
self.split_tokens = self.split_and_eat_tokens + self.split_and_keep_tokens
self.none_on_eof = none_on_eof # current token is set to None when EOF is hit
def parse_word(self, c, index, line, column):
end = self.split_tokens
escaped = False
buffer = ""
while escaped or c not in end:
if not escaped and c == "\\":
escaped = True
elif not escaped and c in ("'", '"'):
end = [c]
else:
buffer += c
escaped = False
index, column = index + 1, column + 1
if index == len(self.text):
break
c = self.text[index]
if c == "\n":
line += 1
column = 0
if c not in self.split_and_keep_tokens: # 'not in' instead of 'in' to when c is a quote
index, column = index + 1, column + 1
return buffer, index, line, column
def split(self):
index = 0
line = 1
column = 1
while index < len(self.text):
c = self.text[index]
if c == "=":
if index + 1 < len(self.text) and self.text[index + 1] == "=":
yield Token(TokenKind.EQUALSEQUALS, "==", index, line, column)
index, column = index + 2, column + 2
else:
yield Token(TokenKind.EQUALS, "=", index, line, column)
index, column = index + 1, column + 1
elif c == ")":
yield Token(TokenKind.RPAR, ")", index, line, column)
index, column = index + 1, column + 1
elif c == "(":
yield Token(TokenKind.LPAR, "(", index, line, column)
index, column = index + 1, column + 1
elif c == ",":
yield Token(TokenKind.COMMA, ",", index, line, column)
index, column = index + 1, column + 1
else:
buffer, end_index, end_line, end_column = self.parse_word(c, index, line, column)
if buffer:
yield Token(TokenKind.WORD, buffer, index, line, column)
index, line, column = end_index, end_line, end_column
yield Token(TokenKind.EOF, "<eof>", index, line, column)
def reset_parser(self, context, text):
self.context = context
self.sheerka = context.sheerka if context else None
self.text = text
self._current = None
self.iter_split = iter(self.split())
def add_error(self, error, next_token=True):
self.error_sink.append(error)
if next_token:
self.next_token()
return error
def get_token(self) -> Token:
return self._current
def next_token(self):
try:
self._current = next(self.iter_split)
if self._current.type == TokenKind.EOF:
if self.none_on_eof:
self._current = None
return False
except StopIteration:
self._current = None
return False
return True
+6 -13
View File
@@ -1,19 +1,12 @@
from dataclasses import dataclass
import core.utils
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.Sheerka import ExecutionContext
from core.tokenizer import Tokenizer, Token, TokenKind, LexerError
from parsers.BaseParser import BaseParser, ErrorNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError
from parsers.BnfNodeParser import OrderedChoice, Sequence, Optional, ZeroOrMore, OneOrMore, \
ConceptExpression, StrMatch
@dataclass()
class UnexpectedEndOfFileError(ErrorNode):
pass
class BnfDefinitionParser(BaseParser):
"""
Parser used to transform literal into ParsingExpression
@@ -52,7 +45,7 @@ class BnfDefinitionParser(BaseParser):
self.context = context
self.sheerka = context.sheerka
self.source = ""
self.lexer_iter = iter(Tokenizer(text.strip())) if isinstance(text, str) else iter(text)
self._current = None
self.after_current = None
@@ -123,7 +116,7 @@ class BnfDefinitionParser(BaseParser):
token = self.get_token()
if token and token.type != TokenKind.EOF:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, []))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, []))
except LexerError as e:
return self.sheerka.ret(
self.name,
@@ -221,7 +214,7 @@ class BnfDefinitionParser(BaseParser):
def parse_expression(self):
token = self.get_token()
if token.type == TokenKind.EOF:
self.add_error(UnexpectedEndOfFileError(), False)
self.add_error(UnexpectedEofParsingError(), False)
if token.type == TokenKind.LPAR:
self.nb_open_par += 1
self.next_token()
@@ -232,7 +225,7 @@ class BnfDefinitionParser(BaseParser):
self.next_token()
return self.eat_rule_name_if_needed(expr)
else:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
return expr
if token.type == TokenKind.CONCEPT:
@@ -291,7 +284,7 @@ class BnfDefinitionParser(BaseParser):
if token is None or token.type != TokenKind.IDENTIFIER:
return self.add_error(
UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.IDENTIFIER]))
UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.IDENTIFIER]))
expression.rule_name = token.value
self.next_token()
-66
View File
@@ -1341,72 +1341,6 @@ class BnfNodeParser(BaseNodeParser):
debugger.debug_var("result", concept_parser_helpers)
return concept_parser_helpers
def fix_infinite_recursions(self, context, grammar, concept_id, parsing_expression):
"""
Check the newly created parsing expression
Some infinite recursion can be resolved, simply by removing the pexpression that causes the loop
Let's look for that
:param context:
:param grammar:
:param concept_id:
:param parsing_expression:
:return:
"""
def _find(expression_, path_):
index_ = -1
parent_ = None
for node_id in path_:
expression_ = expression_.nodes[0] if isinstance(expression_, ConceptExpression) else expression_
for i, node in [(i, n) for i, n in enumerate(expression_.nodes) if isinstance(n, ConceptExpression)]:
if node_id == node.concept.id:
index_ = i
parent_ = expression_
expression_ = node # take the child of the ConceptExpression found
break
else:
raise IndexError(f"path {path_} cannot be found in '{expression_}'")
return parent_, index_, expression_
def _fix_node(expression, path):
parent, index, expression_update = _find(expression, path[1:-2])
assert isinstance(expression_update, ConceptExpression)
desc = f"Fixing circular reference {path}"
with context.push(BuiltinConcepts.INIT_BNF,
expression_update.concept,
who=self.name,
obj=expression_update.concept,
concepts_to_skip=[concept_id],
desc=desc) as sub_context:
new_grammar = grammar.copy()
for node_id in path[-2:]:
del new_grammar[node_id]
new_nodes = self.resolve_concept_parsing_expression(sub_context,
expression_update.concept,
expression_update.rule_name, new_grammar, set())
new = ConceptExpression(expression_update.concept,
rule_name=expression_update.rule_name,
nodes=new_nodes)
parent.nodes[index] = new
while True:
already_found = [concept_id]
concepts_in_recursion = []
if self.check_for_infinite_recursion(parsing_expression, already_found, concepts_in_recursion):
if "#" in concepts_in_recursion[-2]:
# means that it's isaset concept
_fix_node(parsing_expression, concepts_in_recursion[:-1])
else:
break
else:
break
return concepts_in_recursion
def check_for_infinite_recursion(self, parsing_expression, already_found, in_recursion, only_first=False):
if isinstance(parsing_expression, ConceptExpression):
+9 -9
View File
@@ -7,7 +7,7 @@ from core.concept import ConceptParts, DEFINITION_TYPE_BNF, DEFINITION_TYPE_DEF
from core.sheerka.services.SheerkaExecute import ParserInput, SheerkaExecute
from core.tokenizer import TokenKind, Keywords
from parsers.BaseCustomGrammarParser import BaseCustomGrammarParser, SyntaxErrorNode
from parsers.BaseParser import Node, ErrorNode, NotInitializedNode, UnexpectedTokenErrorNode
from parsers.BaseParser import Node, ParsingError, NotInitializedNode, UnexpectedTokenParsingError
from parsers.BnfDefinitionParser import BnfDefinitionParser
@@ -17,7 +17,7 @@ class ParsingException(Exception):
@dataclass()
class DefConceptParserNode(Node):
class DefConceptParsingResult(Node):
"""
Base node for all default parser nodes
"""
@@ -25,12 +25,12 @@ class DefConceptParserNode(Node):
@dataclass()
class DefConceptParserErrorNode(DefConceptParserNode, ErrorNode):
class DefConceptParsingError(DefConceptParsingResult, ParsingError):
pass
@dataclass()
class CannotHandleErrorNode(DefConceptParserErrorNode):
class CannotHandleParsingError(DefConceptParsingError):
"""
The input is not recognized
"""
@@ -38,7 +38,7 @@ class CannotHandleErrorNode(DefConceptParserErrorNode):
@dataclass()
class NameNode(DefConceptParserNode):
class NameNode(DefConceptParsingResult):
def get_name(self):
name = ""
@@ -70,7 +70,7 @@ class NameNode(DefConceptParserNode):
@dataclass()
class DefConceptNode(DefConceptParserNode):
class DefConceptNode(DefConceptParsingResult):
name: NameNode = NotInitializedNode()
where: ReturnValueConcept = NotInitializedNode()
pre: ReturnValueConcept = NotInitializedNode()
@@ -92,7 +92,7 @@ class DefConceptNode(DefConceptParserNode):
@dataclass()
class IsaConceptNode(DefConceptParserNode):
class IsaConceptNode(DefConceptParsingResult):
concept: NameNode = NotInitializedNode()
set: NameNode = NotInitializedNode()
@@ -146,7 +146,7 @@ class DefConceptParser(BaseCustomGrammarParser):
"""
token = self.parser_input.token
if token.value != Keywords.DEF.value:
self.add_error(UnexpectedTokenErrorNode("'def' keyword not found.", token, [Keywords.DEF]))
self.add_error(UnexpectedTokenParsingError("'def' keyword not found.", token, [Keywords.DEF]))
return None
self.context.log("Keyword DEF found.", self.name)
@@ -168,7 +168,7 @@ class DefConceptParser(BaseCustomGrammarParser):
keywords_found.extend([t[0] for t in parts.values()]) # keep track of all keywords found
node = DefConceptNode(keywords_found)
# if first_token.type == TokenKind.EOF:
# return self.add_error(UnexpectedTokenErrorNode([first_token], "Unexpected end of file", [Keywords.CONCEPT]))
# return self.add_error(UnexpectedTokenParsingError([first_token], "Unexpected end of file", [Keywords.CONCEPT]))
# get the name
node.name = self.get_concept_name(parts[Keywords.CONCEPT])
@@ -16,7 +16,7 @@ class FormatRuleNode(Node):
format_ast: FormatAstNode = None
class FormatRuleParser(BaseCustomGrammarParser):
class DefFormatRuleParser(BaseCustomGrammarParser):
"""
Class that will parse formatting rules definitions
eg: when xxx print yyy
@@ -28,7 +28,7 @@ class FormatRuleParser(BaseCustomGrammarParser):
KEYWORDS_VALUES = [k.value for k in KEYWORDS]
def __init__(self, **kwargs):
BaseCustomGrammarParser.__init__(self, "FormatRule", 60)
BaseCustomGrammarParser.__init__(self, "DefFormatRule", 60)
def parse(self, context, parser_input: ParserInput):
"""
+9 -7
View File
@@ -4,8 +4,8 @@ from typing import List, Tuple, Callable
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import LexerError, TokenKind, Token
from parsers.BaseParser import Node, BaseParser, UnexpectedTokenErrorNode, UnexpectedEofNode, ErrorNode
from core.tokenizer import TokenKind, Token
from parsers.BaseParser import Node, BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError, ParsingError
class ExprNode(Node):
@@ -19,7 +19,7 @@ class ExprNode(Node):
@dataclass()
class LeftPartNotFoundError(ErrorNode):
class LeftPartNotFoundError(ParsingError):
"""
When the expression starts with 'or' or 'and'
"""
@@ -161,6 +161,7 @@ class OrNode(ExprNode):
def __str__(self):
return " or ".join([str(p) for p in self.parts])
@dataclass()
class NotNode(ExprNode):
node: ExprNode
@@ -219,7 +220,7 @@ class ExpressionParser(BaseParser):
tree = self.parse_or()
token = self.parser_input.token
if token and token.type != TokenKind.EOF:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, []))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, []))
value = self.get_return_value_body(context.sheerka, self.parser_input.as_text(), tree, tree)
@@ -241,7 +242,7 @@ class ExpressionParser(BaseParser):
self.parser_input.next_token()
expr = self.parse_and()
if expr is None:
self.add_error(UnexpectedEofNode("When parsing 'or'"))
self.add_error(UnexpectedEofParsingError("When parsing 'or'"))
return OrNode(*parts)
parts.append(expr)
token = self.parser_input.token
@@ -259,7 +260,7 @@ class ExpressionParser(BaseParser):
self.parser_input.next_token()
expr = self.parse_names()
if expr is None:
self.add_error(UnexpectedEofNode("When parsing 'and'"))
self.add_error(UnexpectedEofParsingError("When parsing 'and'"))
return AndNode(*parts)
parts.append(expr)
token = self.parser_input.token
@@ -282,7 +283,8 @@ class ExpressionParser(BaseParser):
expr = self.parse_or()
token = self.parser_input.token
if token.type != TokenKind.RPAR:
self.error_sink.append(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
self.error_sink.append(
UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
return expr
self.parser_input.next_token()
return expr
+16 -16
View File
@@ -9,7 +9,7 @@ from core.tokenizer import TokenKind, Token
from core.utils import get_n_clones
from parsers.SequenceNodeParser import SequenceNodeParser
from parsers.BaseNodeParser import SourceCodeNode, SourceCodeWithConceptNode, UnrecognizedTokensNode
from parsers.BaseParser import BaseParser, UnexpectedTokenErrorNode, UnexpectedEofNode, Node
from parsers.BaseParser import BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError, Node
from parsers.BnfNodeParser import BnfNodeParser
from parsers.PythonWithConceptsParser import PythonWithConceptsParser
from parsers.RuleParser import RuleParser
@@ -191,9 +191,9 @@ class FunctionParser(BaseParser):
node = self.parse_function()
if self.parser_input.next_token():
self.add_error(UnexpectedTokenErrorNode("Only one function supported",
self.parser_input.token,
[TokenKind.EOF]))
self.add_error(UnexpectedTokenParsingError("Only one function supported",
self.parser_input.token,
[TokenKind.EOF]))
if self.has_error:
if node is None:
@@ -222,25 +222,25 @@ class FunctionParser(BaseParser):
start = self.parser_input.pos
token = self.parser_input.token
if token.type != TokenKind.IDENTIFIER:
self.add_error(UnexpectedTokenErrorNode(f"{token.repr_value} is not a identifier",
token,
[TokenKind.IDENTIFIER]))
self.add_error(UnexpectedTokenParsingError(f"{token.repr_value} is not a identifier",
token,
[TokenKind.IDENTIFIER]))
return None
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"Unexpected EOF while parsing left parenthesis"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF while parsing left parenthesis"))
return None
token = self.parser_input.token
if token.type != TokenKind.LPAR:
self.add_error(UnexpectedTokenErrorNode(f"{token.repr_value} is not a left parenthesis",
token,
[TokenKind.LPAR]))
self.add_error(UnexpectedTokenParsingError(f"{token.repr_value} is not a left parenthesis",
token,
[TokenKind.LPAR]))
return None
start_node = NamesNode(start, start + 1, self.parser_input.tokens[start:start + 2])
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"Unexpected EOF after left parenthesis"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF after left parenthesis"))
return FunctionNode(start_node, None, None)
params = self.parse_parameters()
@@ -249,9 +249,9 @@ class FunctionParser(BaseParser):
token = self.parser_input.token
if not token or token.type != TokenKind.RPAR:
self.add_error(UnexpectedTokenErrorNode(f"Right parenthesis not found",
token,
[TokenKind.RPAR]))
self.add_error(UnexpectedTokenParsingError(f"Right parenthesis not found",
token,
[TokenKind.RPAR]))
return FunctionNode(start_node, None, params)
return FunctionNode(start_node,
@@ -270,7 +270,7 @@ class FunctionParser(BaseParser):
token = self.parser_input.token
if token.type == TokenKind.EOF:
self.add_error(UnexpectedEofNode(f"Unexpected EOF while parsing parameters"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF while parsing parameters"))
return None
if token.type == TokenKind.RPAR:
+4 -4
View File
@@ -6,7 +6,7 @@ import core.utils
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind
from parsers.BaseParser import BaseParser, Node, ErrorNode
from parsers.BaseParser import BaseParser, Node, ParsingError
log = logging.getLogger(__name__)
@@ -20,7 +20,7 @@ def get_python_node(obj):
@dataclass()
class PythonErrorNode(ErrorNode):
class PythonErrorNode(ParsingError):
source: str
exception: Exception
@@ -29,7 +29,7 @@ class PythonErrorNode(ErrorNode):
@dataclass()
class ConceptDetected(ErrorNode):
class ConceptDetectedError(ParsingError):
name: str
@@ -138,7 +138,7 @@ class PythonParser(BaseParser):
if isinstance(tree, ast.Expression) and isinstance(tree.body, ast.Name):
if tree.body.id in tracker or context.sheerka.fast_resolve(tree.body.id, return_new=False) is not None:
context.log("It's a simple concept. Not for me.", self.name)
self.error_sink.append(ConceptDetected(tree.body.id))
self.error_sink.append(ConceptDetectedError(tree.body.id))
if self.has_error:
ret = sheerka.ret(
+7 -7
View File
@@ -2,16 +2,16 @@ from core.builtin_concepts import BuiltinConcepts
from core.rule import Rule, ACTION_TYPE_DEFERRED
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind
from parsers.BaseParser import BaseParser, ErrorNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, ParsingError, UnexpectedTokenParsingError
class RuleNotFound(ErrorNode):
class RuleNotFoundError(ParsingError):
def __init__(self, id_as_tuple):
self.key = id_as_tuple[0]
self.id = id_as_tuple[1]
def __repr__(self):
return f"RuleNotFound(id={self.id}, key={self.key}"
return f"RuleNotFoundError(id={self.id}, key={self.key}"
class RuleParser(BaseParser):
@@ -54,9 +54,9 @@ class RuleParser(BaseParser):
token = parser_input.token
if parser_input.next_token():
reason = UnexpectedTokenErrorNode("Only one rule supported",
parser_input.token,
[TokenKind.EOF])
reason = UnexpectedTokenParsingError("Only one rule supported",
parser_input.token,
[TokenKind.EOF])
return sheerka.ret(self.name,
False,
sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=parser_input.as_text(), reason=reason))
@@ -76,7 +76,7 @@ class RuleParser(BaseParser):
return sheerka.ret(self.name,
False,
sheerka.new(BuiltinConcepts.ERROR,
body=[RuleNotFound(token.value)]))
body=[RuleNotFoundError(token.value)]))
body = sheerka.new(BuiltinConcepts.PARSER_RESULT,
parser=self,
source=parser_input.as_text(),
+5 -5
View File
@@ -7,7 +7,7 @@ from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Tokenizer, TokenKind
from core.utils import strip_tokens, make_unique
from parsers.BaseNodeParser import BaseNodeParser, ConceptNode, UnrecognizedTokensNode, SourceCodeNode
from parsers.BaseParser import UnexpectedTokenErrorNode, ErrorNode
from parsers.BaseParser import UnexpectedTokenParsingError, ParsingError
from parsers.BnfNodeParser import BnfNodeParser
from parsers.SyaNodeParser import SyaNodeParser
@@ -15,14 +15,14 @@ PARSERS = [BnfNodeParser.NAME, SyaNodeParser.NAME, "Python"]
@dataclass()
class TokensNodeFound(ErrorNode):
class TokensNodeFoundError(ParsingError):
expected_tokens: list
def __eq__(self, other):
if id(other) == id(self):
return True
if not isinstance(other, UnexpectedTokenErrorNode):
if not isinstance(other, UnexpectedTokenParsingError):
return False
if self.message != other.message:
@@ -96,7 +96,7 @@ class AtomConceptParserHelper:
self.debug.append(token)
if self.expected_tokens[0] != token.strip_quote:
self.errors.append(UnexpectedTokenErrorNode(
self.errors.append(UnexpectedTokenParsingError(
f"Found '{token}' while expecting '{self.expected_tokens[0]}'",
token,
[self.expected_tokens[0]]))
@@ -186,7 +186,7 @@ class AtomConceptParserHelper:
forked.finalize()
if self.expected_tokens:
self.errors.append(TokensNodeFound(self.expected_tokens))
self.errors.append(TokensNodeFoundError(self.expected_tokens))
def clone(self):
clone = AtomConceptParserHelper(self.context)
+15 -15
View File
@@ -14,7 +14,7 @@ from core.tokenizer import Token, TokenKind, Tokenizer
from core.utils import get_n_clones, get_text_from_tokens, NextIdManager
from parsers.BaseNodeParser import UnrecognizedTokensNode, ConceptNode, SourceCodeNode, SyaAssociativity, \
SourceCodeWithConceptNode, BaseNodeParser
from parsers.BaseParser import ErrorNode
from parsers.BaseParser import ParsingError
PARSERS = ["Sequence", "Bnf", "Python"]
@@ -53,7 +53,7 @@ class DebugInfo:
return msg + f" => {self.action}"
class ParenthesisMismatchErrorNode(ErrorNode):
class ParenthesisMismatchError(ParsingError):
def __init__(self, error_int):
if isinstance(error_int, tuple):
@@ -79,7 +79,7 @@ class ParenthesisMismatchErrorNode(ErrorNode):
if id(self) == id(other):
return True
if not isinstance(other, ParenthesisMismatchErrorNode):
if not isinstance(other, ParenthesisMismatchError):
return False
return self.token_value == other.token_value and self.pos == other.pos
@@ -88,11 +88,11 @@ class ParenthesisMismatchErrorNode(ErrorNode):
return hash(self.pos)
def __repr__(self):
return f"ParenthesisMismatchErrorNode('{self.token_value}', {self.pos}"
return f"ParenthesisMismatchError('{self.token_value}', {self.pos}"
@dataclass()
class NoneAssociativeSequenceErrorNode(ErrorNode):
class NoneAssociativeSequenceError(ParsingError):
concept: Concept
first: int
second: int
@@ -100,7 +100,7 @@ class NoneAssociativeSequenceErrorNode(ErrorNode):
@dataclass()
class TooManyParametersFound(ErrorNode):
class TooManyParametersFoundError(ParsingError):
concept: Concept
pos: int # position of the concept
token: Token # token of the concept where the error was noticed
@@ -532,7 +532,7 @@ class InFixToPostFix:
# manage parenthesis that didn't find any match
if self._is_lpar(self.stack[-1]):
self._add_error(ParenthesisMismatchErrorNode(self.stack[-1]))
self._add_error(ParenthesisMismatchError(self.stack[-1]))
# The parameter must be part the current concept being parsed
assert len(self._concepts()) != 0 # sanity check
@@ -560,7 +560,7 @@ class InFixToPostFix:
if self.unrecognized_tokens.parenthesis_count > 0:
# parenthesis mismatch detected, do not try to resolve the unrecognized
self._add_error(ParenthesisMismatchErrorNode(self.unrecognized_tokens))
self._add_error(ParenthesisMismatchError(self.unrecognized_tokens))
self._put_to_out(self.unrecognized_tokens)
else:
# try to recognize concepts
@@ -676,7 +676,7 @@ class InFixToPostFix:
if stack.associativity == SyaAssociativity.No and current.associativity == SyaAssociativity.No:
self._add_error(
NoneAssociativeSequenceErrorNode(current.concept, stack_head.start, sya_parser_helper.start))
NoneAssociativeSequenceError(current.concept, stack_head.start, sya_parser_helper.start))
if current.associativity == SyaAssociativity.Left and current.precedence <= stack.precedence:
if self.debug_enabled:
@@ -719,7 +719,7 @@ class InFixToPostFix:
self.pop_stack_to_out()
if self._is_lpar(self.stack[-1]):
self._add_error(ParenthesisMismatchErrorNode(self.stack[-1]))
self._add_error(ParenthesisMismatchError(self.stack[-1]))
return False
# Manage concepts ending with long names
@@ -777,7 +777,7 @@ class InFixToPostFix:
if len(self.parameters_list) > len(current_concept.parameters_list_at_init):
# we have eaten the parameters expected between two expected tokens
# But there are some remaining parameters
self._add_error(TooManyParametersFound(
self._add_error(TooManyParametersFoundError(
current_concept.concept.concept,
current_concept.start,
token,
@@ -951,7 +951,7 @@ class InFixToPostFix:
# checks consistency if an lpar is found
if len(self.stack) == 0:
self._add_error(ParenthesisMismatchErrorNode((token, pos)))
self._add_error(ParenthesisMismatchError((token, pos)))
return None
if self._stack_isinstance(UnrecognizedTokensNode):
@@ -962,7 +962,7 @@ class InFixToPostFix:
start = i
break
else:
self._add_error(ParenthesisMismatchErrorNode((token, pos)))
self._add_error(ParenthesisMismatchError((token, pos)))
return None
source_code = self._make_source_code_with_concept(start, token, pos)
@@ -1073,7 +1073,7 @@ class InFixToPostFix:
if len(self.stack) == 0 and len(self.out) == 0:
# check for parenthesis mismatch
if self.unrecognized_tokens.parenthesis_count > 0:
self._add_error(ParenthesisMismatchErrorNode(self.unrecognized_tokens))
self._add_error(ParenthesisMismatchError(self.unrecognized_tokens))
return # no need to pop the buffer, as no concept is found
if self.debug_enabled:
@@ -1084,7 +1084,7 @@ class InFixToPostFix:
# validate parenthesis
if self._is_lpar(parser_helper) or self._is_rpar(parser_helper):
self._add_error(ParenthesisMismatchErrorNode(parser_helper))
self._add_error(ParenthesisMismatchError(parser_helper))
return None
self.manage_unrecognized()
+2 -2
View File
@@ -5,7 +5,7 @@ from core.builtin_concepts import BuiltinConcepts
from core.builtin_helpers import only_successful, parse_unrecognized, get_lexer_nodes, update_compiled
from parsers.SequenceNodeParser import SequenceNodeParser
from parsers.BaseNodeParser import ConceptNode, UnrecognizedTokensNode, SourceCodeNode, SourceCodeWithConceptNode
from parsers.BaseParser import BaseParser, ErrorNode
from parsers.BaseParser import BaseParser, ParsingError
from parsers.BnfNodeParser import BnfNodeParser
from parsers.SyaNodeParser import SyaNodeParser
@@ -18,7 +18,7 @@ PARSERS = ["EmptyString",
@dataclass()
class CannotParseNode(ErrorNode):
class CannotParseError(ParsingError):
unrecognized: UnrecognizedTokensNode
-21
View File
@@ -38,14 +38,8 @@ class SheerkaPromptCompleter(Completer):
for name, bound_method in sheerka.sheerka_methods.items():
self.builtins.append(self.get_completion_desc(name, bound_method.method, "builtin", ["context"]))
self.pipeable_builtins = []
for name, pipeable in self.sheerka.sheerka_pipeables.items():
self.pipeable_builtins.append(
self.get_completion_desc(name, pipeable.method, "builtin", ["context", "iterable"]))
self.exit_commands = [CompletionDesc(c, c, "command") for c in EXIT_COMMANDS]
self.globals = {k: v.method for k, v in self.sheerka.sheerka_methods.items()}
self.globals.update({k: v.method for k, v in self.sheerka.sheerka_pipeables.items()})
def get_completions(self, document, complete_event):
@@ -64,14 +58,6 @@ class SheerkaPromptCompleter(Completer):
yield from self.yield_completion_from_completion_desc(self.builtins, text)
return
if self.after_pipe(document.text, document.cursor_position):
if document.char_before_cursor == " ":
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, None)
else:
text = self.last_word(document.text, document.cursor_position)
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, text)
return
yield from self.yield_completion_from_completion_desc(self.builtins, text)
def get_completions_fom_jedi(self, document):
@@ -179,13 +165,6 @@ class SheerkaPromptCompleter(Completer):
return None
@staticmethod
def after_pipe(text, pos):
for i in range(pos)[::-1]:
if text[i] == "|":
return True
return False
@staticmethod
def last_word(text, pos, left_strip=True):
if pos == 0:
+1 -2
View File
@@ -851,7 +851,7 @@ class TestSheerkaDebugManager(TestUsingMemoryBasedSheerka):
res = sheerka.inspect(context, 0)
assert res.body == {'#type#': 'NotFound',
'id': '70',
'id': sheerka.concepts_ids[BuiltinConcepts.NOT_FOUND],
'key': '__NOT_FOUND',
'name': '__NOT_FOUND',
'body': 'no digest'}
@@ -873,7 +873,6 @@ class TestSheerkaDebugManager(TestUsingMemoryBasedSheerka):
'#type#': 'ReturnValueConcept',
'id': '43',
'key': '__RETURN_VALUE',
'message': None,
'name': '__RETURN_VALUE',
'parents': [concept_debug_obj],
'status': True,
-233
View File
@@ -1,233 +0,0 @@
from dataclasses import dataclass
import pytest
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.services.SheerkaFilter import Pipe, SheerkaFilter
from printer.FormatInstructions import FormatInstructions, FormatDetailDesc, FormatDetailType
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@dataclass
class Obj:
prop1: str
prop2: str
@dataclass
class ObjWithAsBag:
prop1: str
prop2: object
def as_bag(self):
return {
"first_prop": self.prop1,
"second_prop": self.prop2,
}
class TestSheerkaFilter(TestUsingMemoryBasedSheerka):
def test_i_can_pipe_using_decorator(self):
@Pipe
def is_ok_with_decorator(iterable):
for item in iterable:
yield item + " ok"
def exclamation(iterable):
for item in iterable:
yield item + "!"
res = ["one", "two", "three"] | is_ok_with_decorator | Pipe(exclamation)
assert list(res) == ["one ok!", "two ok!", "three ok!"]
def test_i_can_pipe_function_with_context_as_first_parameter(self):
def func_with_context(context, iterable, var_name):
for item in iterable:
yield f"{context.desc}: {var_name}={item}"
sheerka, context = self.init_concepts()
context.desc = "desc"
pipeable = Pipe(func_with_context, context)
assert pipeable.need_context
assert list(["one", "two", "three"] | pipeable("var")) == ['desc: var=one', 'desc: var=two', 'desc: var=three']
def test_i_can_pipe_function_with_context_as_only_parameter(self):
# This time, func_with_context does not have other parameter than context and iterable
def func_with_context(context, iterable):
for item in iterable:
yield f"{context.desc}: var={item}"
sheerka, context = self.init_concepts()
context.desc = "desc"
pipeable = Pipe(func_with_context, context)
assert pipeable.need_context
assert list(["one", "two", "three"] | pipeable) == ['desc: var=one', 'desc: var=two', 'desc: var=three']
def test_i_can_pipe_explanation_concept(self):
sheerka, context = self.init_concepts()
execution_contexts = [context.push(BuiltinConcepts.NOP, None, desc=f"desc_{i}") for i in range(4)]
explanation_node = sheerka.new(BuiltinConcepts.EXPLANATION, body=execution_contexts)
@Pipe
def get_desc(iterable):
for item in iterable:
yield item.desc
res = explanation_node | get_desc
assert sheerka.isinstance(res, BuiltinConcepts.EXPLANATION)
assert list(res.body) == ["desc_0", "desc_1", "desc_2", "desc_3"] # body is modified
@pytest.mark.parametrize("predicate, expected", [
("True", ["one", "two", "three"]),
("self == 'two'", ["two"])
])
def test_i_can_filter(self, predicate, expected):
filter_service = SheerkaFilter(None)
res = ["one", "two", "three"] | Pipe(filter_service.pipe_filter)(predicate)
assert list(res) == expected
def test_i_can_filter_obj(self):
filter_service = SheerkaFilter(None)
lst = [Obj("a", "b"), Obj("c", "d")]
predicate = "prop2 == 'd'"
res = lst | Pipe(filter_service.pipe_filter)(predicate)
assert list(res) == [Obj("c", "d")]
def test_i_can_filter_obj_implementing_as_bag(self):
filter_service = SheerkaFilter(None)
lst = [ObjWithAsBag("a", "b"), ObjWithAsBag("c", "d")]
predicate = "second_prop == 'd'"
res = lst | Pipe(filter_service.pipe_filter)(predicate)
assert list(res) == [ObjWithAsBag("c", "d")]
def test_i_can_manage_name_error(self):
filter_service = SheerkaFilter(None)
lst = [Obj("a", "b"), Obj("c", "d"), ObjWithAsBag("a", "b"), ObjWithAsBag("c", "d")]
predicate = "second_prop == 'd'" # 'second_prop' does not exist in Obj
res = lst | Pipe(filter_service.pipe_filter)(predicate)
assert list(res) == [ObjWithAsBag("c", "d")]
def test_i_cannot_filter_if_the_predicate_is_incorrect(self):
filter_service = SheerkaFilter(None)
lst = [Obj("a", "b"), Obj("c", "d")]
predicate = "prop2 =="
with pytest.raises(SyntaxError):
res = lst | Pipe(filter_service.pipe_filter)(predicate)
list(res)
def test_i_can_format_l(self):
sheerka, context, foo, bar = self.init_concepts("foo", "bar")
lst = [foo, bar]
res = lst | Pipe(SheerkaFilter.pipe_format_l)("my_format")
res = list(res)
assert len(res) == 2
format_instructions = res[0].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.format_l[f"c:{foo.id}:"] == "my_format"
format_instructions = res[1].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.format_l[f"c:{bar.id}:"] == "my_format"
def test_i_can_format_d(self):
sheerka, context, foo, bar = self.init_concepts("foo", "bar")
lst = [foo, bar]
res = lst | Pipe(SheerkaFilter.pipe_format_d)("id", "name", "body", id="%red%{id}%reset%")
res = list(res)
expected_props = {
"id": "%red%{id}%reset%",
"name": "{name}",
"body": "{body}"
}
assert len(res) == 2
format_instructions = res[0].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.format_d[f"c:{foo.id}:"] == FormatDetailDesc(FormatDetailType.Props_In_Line, expected_props)
format_instructions = res[1].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.format_d[f"c:{bar.id}:"] == FormatDetailDesc(FormatDetailType.Props_In_Line, expected_props)
def test_i_can_format_d_all_properties(self):
sheerka, context, foo, bar = self.init_concepts("foo", "bar")
lst = [foo, bar]
res = lst | Pipe(SheerkaFilter.pipe_format_d)()
res = list(res)
expected_props = {
'id': '{id}',
'name': '{name}',
'key': '{key}',
'body': '{body}',
'self': '{self}'
}
assert len(res) == 2
format_instructions = res[0].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.format_d[f"c:{foo.id}:"] == FormatDetailDesc(FormatDetailType.Props_In_Line, expected_props)
def test_i_can_set_recurse(self):
sheerka, context, foo, bar = self.init_concepts("foo", "bar")
lst = [foo, bar]
res = lst | Pipe(SheerkaFilter.pipe_recurse)(10)
res = list(res)
assert len(res) == 2
format_instructions = res[0].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.recursive_props["_children"] == 10
format_instructions = res[1].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.recursive_props["_children"] == 10
res = lst | Pipe(SheerkaFilter.pipe_recurse)(15, "other_prop")
res = list(res)
assert len(res) == 2
format_instructions = res[0].get_prop(BuiltinConcepts.FORMAT_INSTRUCTIONS)
assert isinstance(format_instructions, FormatInstructions)
assert format_instructions.recursive_props["_children"] == 10
assert format_instructions.recursive_props["other_prop"] == 15
def test_i_can_inspect_obj(self):
filter_service = SheerkaFilter(None)
lst = [Obj("a", "b"), Obj("c", "d")]
res = lst | Pipe(filter_service.pipe_inspect)("prop2")
assert list(res) == ["b", "d"]
def test_i_can_inspect_obj_with_bag(self):
filter_service = SheerkaFilter(None)
lst = [ObjWithAsBag("a", "b"), ObjWithAsBag("c", "d")]
res = lst | Pipe(filter_service.pipe_inspect)("second_prop")
assert list(res) == ["b", "d"]
lst = [ObjWithAsBag("a", ObjWithAsBag("b", ObjWithAsBag("c", "d")))]
res = lst | Pipe(filter_service.pipe_inspect)("second_prop.second_prop.second_prop")
assert list(res) == ["d"]
+1 -2
View File
@@ -86,14 +86,13 @@ class TestSheerkaUsingMemoryBasedSheerka(TestUsingMemoryBasedSheerka):
def test_i_can_instantiate_a_builtin_concept_when_it_has_its_own_class(self):
sheerka = self.get_sheerka()
ret = sheerka.new(BuiltinConcepts.RETURN_VALUE, who="who", status="status", value="value", message="message")
ret = sheerka.new(BuiltinConcepts.RETURN_VALUE, who="who", status="status", value="value")
assert isinstance(ret, ReturnValueConcept)
assert ret.key == str(BuiltinConcepts.RETURN_VALUE)
assert ret.who == "who"
assert ret.status == "status"
assert ret.value == "value"
assert ret.message == "message"
# check the others
for key, concept_class in sheerka.get_builtins_classes_as_dict().items():
-11
View File
@@ -3,7 +3,6 @@ from dataclasses import dataclass
import pytest
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept, ConceptParts
from core.sheerka.services.SheerkaFilter import Pipe, SheerkaFilter
from printer.Formatter import Formatter, BraceToken
from printer.SheerkaPrinter import FormatInstructions
@@ -446,16 +445,6 @@ bar: *name 'bar' is not defined*
captured = capsys.readouterr()
assert captured.out == expected
def test_i_can_manage_exception_when_printing(self, capsys):
sheerka = self.get_sheerka()
filter_service = SheerkaFilter(sheerka)
predicate = "self='two'" # it should be self=='two'
items = ["one", "two", "three"] | Pipe(filter_service.pipe_filter)(predicate)
sheerka.print(items)
captured = capsys.readouterr()
assert captured.out == "\x1b[31mSyntaxError: invalid syntax\nself='two'\n ^\x1b[0m\n"
@pytest.mark.parametrize("template, expected", [
(None, []),
("", []),
+1
View File
@@ -61,6 +61,7 @@ def python_ret_val(source):
python_node = PythonNode(source, ast.parse(source, f"<source>", 'eval'))
return pr_ret_val(python_node, parser="Python", source=source)
def new_concept(key, **kwargs):
res = Concept(key=key, name=key, is_builtin=False, is_unique=False)
for k, v in kwargs.items():
+2 -2
View File
@@ -4,7 +4,7 @@ from core.rule import Rule, RuleMetadata
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Tokenizer
from evaluators.FormatRuleEvaluator import FormatRuleEvaluator
from parsers.FormatRuleParser import FormatRuleNode, FormatRuleParser
from parsers.DefFormatRuleParser import FormatRuleNode, DefFormatRuleParser
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -33,7 +33,7 @@ class TestFormatRuleEvaluator(TestUsingMemoryBasedSheerka):
def test_i_can_eval(self):
sheerka, context = self.init_concepts()
text = "when isinstance(value, __EXPLANATION) print list(value)"
ret_val = FormatRuleParser().parse(context, ParserInput(text))
ret_val = DefFormatRuleParser().parse(context, ParserInput(text))
res = FormatRuleEvaluator().eval(context, ret_val)
@@ -0,0 +1,56 @@
import pytest
from core.builtin_concepts_ids import BuiltinConcepts
from evaluators.MultipleSuccessEvaluator import MultipleSuccessEvaluator
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
from tests.evaluators.EvaluatorTestsUtils import p_ret_val_true, reduced_requested, e_ret_val_new, p_ret_val_false, \
e_ret_val_false, ret_val
class TestMultipleSuccessEvaluator(TestUsingMemoryBasedSheerka):
@pytest.mark.parametrize("return_values, expected", [
([p_ret_val_true()], False), # all remaining parsers must be in error
([e_ret_val_false()], False), # all remaining evaluators must be successful
([e_ret_val_new("foo"),
e_ret_val_new("foo")], False), # same returns values
([e_ret_val_new("foo", body=1),
e_ret_val_new("foo", body=1)], False), # same returns values
([reduced_requested,
e_ret_val_new("foo"),
e_ret_val_new("bar"),
p_ret_val_false("value")], True),
])
def test_i_can_match(self, return_values, expected):
sheerka, context = self.init_concepts()
evaluator = MultipleSuccessEvaluator()
assert evaluator.matches(context, return_values) == expected
def test_i_can_eval(self):
sheerka, context = self.init_concepts()
evaluator = MultipleSuccessEvaluator()
ret1 = e_ret_val_new("foo")
ret2 = e_ret_val_new("bar")
parser_in_error = p_ret_val_false("value")
return_values = [reduced_requested,
ret1,
ret2,
parser_in_error,
ret_val("success value not coming from evaluator")]
assert evaluator.matches(context, return_values)
res = evaluator.eval(context, return_values)
assert sheerka.isinstance(res, BuiltinConcepts.RETURN_VALUE)
assert res.who == evaluator.name
assert res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.MULTIPLE_SUCCESS)
assert res.body.body == [ret1, ret2]
assert res.parents == [reduced_requested, ret1, ret2, parser_in_error]
+21
View File
@@ -2,6 +2,11 @@ from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
class TestSheerkaNonRegDisplay(TestUsingMemoryBasedSheerka):
@classmethod
def teardown_class(cls):
# At the end of the tests, sheerka singleton instance will be corrupted
# Ask for a new one
TestSheerkaNonRegDisplay.singleton_instance = None
def test_i_can_display_results_when_return_values_processing_is_on(self, capsys):
init = [
@@ -51,4 +56,20 @@ post : None
ret : None
vars : []
props : {}
"""
def test_i_can_display_multiple_success(self, capsys):
init = [
"def concept foo as 1",
"def concept foo as 2",
]
sheerka = self.init_scenario(init)
capsys.readouterr()
sheerka.enable_process_return_values = True
sheerka.evaluate_user_input("foo")
captured = capsys.readouterr()
assert captured.out == """ReturnValue(who=evaluators.Concept, status=True, value=(1001)foo)
ReturnValue(who=evaluators.Concept, status=True, value=(1002)foo)
"""
+16 -16
View File
@@ -332,7 +332,7 @@ class TestSheerkaOut(TestUsingMemoryBasedSheerka):
captured = capsys.readouterr()
assert captured.out == """foo: (1001)foo
bar: (1002)bar
ReturnValue(who=Test, status=True, value=(1003)baz, message=None)
ReturnValue(who=Test, status=True, value=(1003)baz)
"""
def test_i_can_print_out_a_list_with_recurse(self, capsys):
@@ -351,11 +351,11 @@ ReturnValue(who=Test, status=True, value=(1003)baz, message=None)
service.process_return_values(context, rets)
captured = capsys.readouterr()
assert captured.out == """ReturnValue(who=Test, status=True, value=r1, message=None)
ReturnValue(who=Test, status=True, value=r11, message=None)
ReturnValue(who=Test, status=True, value=r111, message=None)
ReturnValue(who=Test, status=True, value=r2, message=None)
ReturnValue(who=Test, status=True, value=r22, message=None)
assert captured.out == """ReturnValue(who=Test, status=True, value=r1)
ReturnValue(who=Test, status=True, value=r11)
ReturnValue(who=Test, status=True, value=r111)
ReturnValue(who=Test, status=True, value=r2)
ReturnValue(who=Test, status=True, value=r22)
"""
def test_i_can_print_out_a_list_with_recurse_using_format_instr(self, capsys):
@@ -376,11 +376,11 @@ ReturnValue(who=Test, status=True, value=r2, message=None)
service.process_return_values(context, rets)
captured = capsys.readouterr()
assert captured.out == """ReturnValue(who=Test, status=True, value=r1, message=None)
ReturnValue(who=Test, status=True, value=r11, message=None)
ReturnValue(who=Test, status=True, value=r111, message=None)
ReturnValue(who=Test, status=True, value=r2, message=None)
ReturnValue(who=Test, status=True, value=r22, message=None)
assert captured.out == """ReturnValue(who=Test, status=True, value=r1)
ReturnValue(who=Test, status=True, value=r11)
ReturnValue(who=Test, status=True, value=r111)
ReturnValue(who=Test, status=True, value=r2)
ReturnValue(who=Test, status=True, value=r22)
"""
def test_i_can_print_out_a_list_with_recurse_using_container_format_instr(self, capsys):
@@ -400,11 +400,11 @@ ReturnValue(who=Test, status=True, value=r2, message=None)
service.process_return_values(context, foo)
captured = capsys.readouterr()
assert captured.out == """ReturnValue(who=Test, status=True, value=r1, message=None)
ReturnValue(who=Test, status=True, value=r11, message=None)
ReturnValue(who=Test, status=True, value=r111, message=None)
ReturnValue(who=Test, status=True, value=r2, message=None)
ReturnValue(who=Test, status=True, value=r22, message=None)
assert captured.out == """ReturnValue(who=Test, status=True, value=r1)
ReturnValue(who=Test, status=True, value=r11)
ReturnValue(who=Test, status=True, value=r111)
ReturnValue(who=Test, status=True, value=r2)
ReturnValue(who=Test, status=True, value=r22)
"""
def test_i_can_print_out_color(self, capsys):
@@ -2,7 +2,7 @@ import pytest
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Keywords, Tokenizer, TokenKind
from parsers.BaseCustomGrammarParser import BaseCustomGrammarParser, SyntaxErrorNode, KeywordNotFound
from parsers.BaseParser import UnexpectedEofNode, UnexpectedTokenErrorNode
from parsers.BaseParser import UnexpectedEofParsingError, UnexpectedTokenParsingError
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -83,9 +83,9 @@ func(a)
sheerka, context, parser = self.init_parser("when xxx print yyy")
assert parser.get_parts(["when", "print"], Keywords.PRINT) is None
assert parser.error_sink == [UnexpectedTokenErrorNode(f"'print' keyword not found.",
assert parser.error_sink == [UnexpectedTokenParsingError(f"'print' keyword not found.",
"when",
[Keywords.PRINT])]
[Keywords.PRINT])]
def test_i_can_detect_when_a_keyword_appears_several_times(self):
sheerka, context, parser = self.init_parser("print hello when True print True")
@@ -106,7 +106,7 @@ func(a)
assert parser.get_parts(["print", "when"]) is not None
assert len(parser.error_sink) == 1
assert isinstance(parser.error_sink[0], UnexpectedEofNode)
assert isinstance(parser.error_sink[0], UnexpectedEofParsingError)
assert parser.error_sink[0].message == "While parsing keyword 'print'."
def test_i_can_double_quoted_strings_are_expanded(self):
@@ -178,7 +178,7 @@ print xxx"""
sheerka, context, parser = self.init_parser(text)
assert parser.get_parts(["when"])
assert parser.error_sink == [UnexpectedTokenErrorNode("Indentation not found.", "x", [TokenKind.WHITESPACE])]
assert parser.error_sink == [UnexpectedTokenParsingError("Indentation not found.", "x", [TokenKind.WHITESPACE])]
@pytest.mark.parametrize("text", [
"",
@@ -197,7 +197,7 @@ print xxx"""
sheerka, context, parser = self.init_parser("")
assert parser.get_body(list(Tokenizer("not a newline", yield_eof=False))) is None
assert parser.error_sink == [UnexpectedTokenErrorNode("New line not found.", "not", [TokenKind.NEWLINE])]
assert parser.error_sink == [UnexpectedTokenParsingError("New line not found.", "not", [TokenKind.NEWLINE])]
@pytest.mark.parametrize("text", [
"\nx x",
@@ -207,14 +207,14 @@ print xxx"""
sheerka, context, parser = self.init_parser("")
assert parser.get_body(list(Tokenizer(text, yield_eof=False))) is None
assert parser.error_sink == [UnexpectedTokenErrorNode("Indentation not found.", "x", [TokenKind.WHITESPACE])]
assert parser.error_sink == [UnexpectedTokenParsingError("Indentation not found.", "x", [TokenKind.WHITESPACE])]
def test_i_can_detect_missing_tab_when_get_body(self):
text = "\n\txxx\n\tyyy\nzzz"
sheerka, context, parser = self.init_parser("")
assert parser.get_body(list(Tokenizer(text, yield_eof=False))) is None
assert parser.error_sink == [UnexpectedTokenErrorNode("Indentation not found.", "zzz", [TokenKind.WHITESPACE])]
assert parser.error_sink == [UnexpectedTokenParsingError("Indentation not found.", "zzz", [TokenKind.WHITESPACE])]
def test_i_can_detect_invalid_indentation_when_get_body(self):
sheerka, context, parser = self.init_parser("")
+2 -46
View File
@@ -1,50 +1,7 @@
import pytest
from core.tokenizer import Tokenizer, TokenKind, Token
from parsers.BaseParser import BaseParser, BaseSplitIterParser
@pytest.mark.parametrize("text, expected", [
("", ["<eof>"]),
("one two -f --file", ["one", "two", "-f", "--file", "<eof>"]),
("one 'two three'", ["one", "two three", "<eof>"]),
('one "two three"', ["one", "two three", "<eof>"]),
('one\\ two three"', ["one two", "three", "<eof>"]),
("one 'two\\' three'", ["one", "two' three", "<eof>"]),
("one\\\\two three", ["one\\two", "three", "<eof>"]),
("one\ntwo three", ["one", "two", "three", "<eof>"]),
("one \n two three", ["one", "two", "three", "<eof>"]),
("'one \n two' three", ["one \n two", "three", "<eof>"]),
("a=b", ["a", "=", "b", "<eof>"]),
("a = b", ["a", "=", "b", "<eof>"]),
("a==b", ["a", "==", "b", "<eof>"]),
("a == b", ["a", "==", "b", "<eof>"]),
])
def test_i_can_split_using_base_split_iterparser_class(text, expected):
parser = BaseSplitIterParser("BaseSplitIterParser", 0)
parser.reset_parser(None, text)
res = [t.value for t in parser.split()]
assert res == expected
def test_i_can_test_split_iter_parser_indexes():
parser = BaseSplitIterParser("BaseSplitIterParser", 0)
text = "one two \n three = ==(),"
parser.reset_parser(None, text)
res = []
while parser.next_token():
res.append(parser.get_token())
assert res[0] == Token(TokenKind.WORD, "one", 0, 1, 1)
assert res[1] == Token(TokenKind.WORD, "two", 4, 1, 5)
assert res[2] == Token(TokenKind.WORD, "three", 10, 2, 2)
assert res[3] == Token(TokenKind.EQUALS, "=", 16, 2, 8)
assert res[4] == Token(TokenKind.EQUALSEQUALS, "==", 18, 2, 10)
assert res[5] == Token(TokenKind.LPAR, "(", 20, 2, 12)
assert res[6] == Token(TokenKind.RPAR, ")", 21, 2, 13)
assert res[7] == Token(TokenKind.COMMA, ",", 22, 2, 14)
from core.tokenizer import Tokenizer
from parsers.BaseParser import BaseParser
@pytest.mark.parametrize("tokens, expected", [
@@ -61,4 +18,3 @@ def test_i_can_test_split_iter_parser_indexes():
])
def test_i_can_get_tokens_boundaries(tokens, expected):
assert BaseParser.get_tokens_boundaries(tokens) == expected
+6 -6
View File
@@ -4,8 +4,8 @@ from core.concept import Concept, DEFINITION_TYPE_BNF
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Tokenizer, TokenKind, LexerError
from parsers.BaseNodeParser import cnode
from parsers.BaseParser import UnexpectedTokenErrorNode
from parsers.BnfDefinitionParser import BnfDefinitionParser, UnexpectedEndOfFileError
from parsers.BaseParser import UnexpectedTokenParsingError, UnexpectedEofParsingError
from parsers.BnfDefinitionParser import BnfDefinitionParser
from parsers.BnfNodeParser import BnfNodeParser
from parsers.BnfNodeParser import StrMatch, Optional, ZeroOrMore, OrderedChoice, Sequence, \
OneOrMore, ConceptExpression
@@ -147,10 +147,10 @@ class TestBnfParser(TestUsingMemoryBasedSheerka):
assert res.value.source == expression
@pytest.mark.parametrize("expression, error", [
("1 ", UnexpectedEndOfFileError()),
("1|", UnexpectedEndOfFileError()),
("(1|)", UnexpectedTokenErrorNode("Unexpected token 'Token(<EOF>)'", eof_token, [TokenKind.RPAR])),
("1=", UnexpectedTokenErrorNode("Unexpected token 'Token(<EOF>)'", eof_token, [TokenKind.IDENTIFIER])),
("1 ", UnexpectedEofParsingError()),
("1|", UnexpectedEofParsingError()),
("(1|)", UnexpectedTokenParsingError("Unexpected token 'Token(<EOF>)'", eof_token, [TokenKind.RPAR])),
("1=", UnexpectedTokenParsingError("Unexpected token 'Token(<EOF>)'", eof_token, [TokenKind.IDENTIFIER])),
])
def test_i_can_detect_errors(self, expression, error):
sheerka, context, parser = self.init_parser()
+9 -9
View File
@@ -7,11 +7,11 @@ from core.concept import DEFINITION_TYPE_BNF, DEFINITION_TYPE_DEF, Concept, CV
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Keywords, Tokenizer, LexerError
from parsers.BaseNodeParser import SCWC
from parsers.BaseParser import NotInitializedNode, UnexpectedEofNode
from parsers.BaseParser import NotInitializedNode, UnexpectedEofParsingError
from parsers.BnfNodeParser import OrderedChoice, ConceptExpression, StrMatch, Sequence
from parsers.BnfDefinitionParser import BnfDefinitionParser
from parsers.DefConceptParser import DefConceptParser, NameNode, SyntaxErrorNode
from parsers.DefConceptParser import UnexpectedTokenErrorNode, DefConceptNode
from parsers.DefConceptParser import UnexpectedTokenParsingError, DefConceptNode
from parsers.FunctionParser import FunctionParser
from parsers.PythonParser import PythonParser, PythonNode
@@ -119,9 +119,9 @@ class TestDefConceptParser(TestUsingMemoryBasedSheerka):
return sheerka, context, parser, *updated
@pytest.mark.parametrize("text, error", [
("concept", UnexpectedTokenErrorNode("'def' keyword not found.", "concept", [Keywords.DEF])),
("hello word", UnexpectedTokenErrorNode("'def' keyword not found.", "hello", [Keywords.DEF])),
("def hello", UnexpectedTokenErrorNode("'concept' keyword not found.", "hello", [Keywords.CONCEPT])),
("concept", UnexpectedTokenParsingError("'def' keyword not found.", "concept", [Keywords.DEF])),
("hello word", UnexpectedTokenParsingError("'def' keyword not found.", "hello", [Keywords.DEF])),
("def hello", UnexpectedTokenParsingError("'concept' keyword not found.", "hello", [Keywords.CONCEPT])),
])
def test_i_can_detect_not_for_me(self, text, error):
sheerka, context, parser, *concepts = self.init_parser()
@@ -196,7 +196,7 @@ class TestDefConceptParser(TestUsingMemoryBasedSheerka):
assert not res.status
assert sheerka.isinstance(return_value, BuiltinConcepts.NOT_FOR_ME)
assert isinstance(return_value.reason[0], UnexpectedTokenErrorNode)
assert isinstance(return_value.reason[0], UnexpectedTokenParsingError)
assert return_value.reason[0].message == "'concept' keyword not found."
assert return_value.reason[0].expected_tokens == [Keywords.CONCEPT]
assert return_value.reason[0].token.value == "hello"
@@ -381,8 +381,8 @@ def concept add one to a as:
("def concept name from def", SyntaxErrorNode([], "Empty 'from' declaration.")),
("def concept name from def ", SyntaxErrorNode([], "Empty 'from' declaration.")),
("def concept name from as True", SyntaxErrorNode([], "Empty 'from' declaration.")),
("def concept name from", UnexpectedEofNode("While parsing keyword 'from'.")),
("def concept name from ", UnexpectedEofNode("While parsing keyword 'from'.")),
("def concept name from", UnexpectedEofParsingError("While parsing keyword 'from'.")),
("def concept name from ", UnexpectedEofParsingError("While parsing keyword 'from'.")),
])
def test_i_can_detect_empty_def_declaration(self, text, error):
sheerka, context, parser, *concepts = self.init_parser()
@@ -474,7 +474,7 @@ from give me the date !
assert not res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.NOT_FOR_ME)
assert isinstance(res.body.reason[0], UnexpectedTokenErrorNode)
assert isinstance(res.body.reason[0], UnexpectedTokenParsingError)
@pytest.mark.parametrize("text, error_msg, error_text", [
("'name", "Missing Trailing quote", "'name"),
@@ -5,7 +5,7 @@ from core.sheerka.services.SheerkaExecute import ParserInput
from core.sheerka.services.SheerkaRuleManager import FormatAstRawText, RulePredicate, FormatAstVariable
from core.tokenizer import Keywords, Tokenizer
from parsers.BaseCustomGrammarParser import KeywordNotFound
from parsers.FormatRuleParser import FormatRuleParser, FormatRuleNode
from parsers.DefFormatRuleParser import DefFormatRuleParser, FormatRuleNode
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -17,7 +17,7 @@ cmap = {
}
class TestFormatRuleParser(TestUsingMemoryBasedSheerka):
class TestDefFormatRuleParser(TestUsingMemoryBasedSheerka):
sheerka = None
@classmethod
@@ -29,10 +29,10 @@ class TestFormatRuleParser(TestUsingMemoryBasedSheerka):
if concepts_map is not None:
sheerka, context, *concepts = self.init_concepts(*concepts_map.values(), create_new=True)
else:
sheerka = TestFormatRuleParser.sheerka
sheerka = TestDefFormatRuleParser.sheerka
context = self.get_context(sheerka)
parser = FormatRuleParser()
parser = DefFormatRuleParser()
return sheerka, context, parser
def test_i_can_detect_empty_expression(self):
+9 -9
View File
@@ -5,7 +5,7 @@ from core.builtin_concepts import BuiltinConcepts, ReturnValueConcept
from core.concept import Concept
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Tokenizer, TokenKind
from parsers.BaseParser import UnexpectedEofNode, UnexpectedTokenErrorNode
from parsers.BaseParser import UnexpectedEofParsingError, UnexpectedTokenParsingError
from parsers.ExpressionParser import PropertyEqualsNode, PropertyEqualsSequenceNode, PropertyContainsNode, AndNode, \
OrNode, NotNode, LambdaNode, IsaNode, NameExprNode, ExpressionParser, LeftPartNotFoundError, TrueifyVisitor
@@ -54,12 +54,12 @@ class TestExpressionParser(TestUsingMemoryBasedSheerka):
assert expressions == expected
@pytest.mark.parametrize("expression, expected_errors", [
("one or", [UnexpectedEofNode("When parsing 'or'")]),
("one and", [UnexpectedEofNode("When parsing 'and'")]),
("one or", [UnexpectedEofParsingError("When parsing 'or'")]),
("one and", [UnexpectedEofParsingError("When parsing 'and'")]),
("and one", [LeftPartNotFoundError()]),
("or one", [LeftPartNotFoundError()]),
("or", [LeftPartNotFoundError(), UnexpectedEofNode("When parsing 'or'")]),
("and", [LeftPartNotFoundError(), UnexpectedEofNode("When parsing 'and'")]),
("or", [LeftPartNotFoundError(), UnexpectedEofParsingError("When parsing 'or'")]),
("and", [LeftPartNotFoundError(), UnexpectedEofParsingError("When parsing 'and'")]),
])
def test_i_can_detect_error(self, expression, expected_errors):
sheerka, context, parser = self.init_parser()
@@ -75,28 +75,28 @@ class TestExpressionParser(TestUsingMemoryBasedSheerka):
res = parser.parse(context, ParserInput("("))
assert not res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.NOT_FOR_ME)
assert isinstance(res.body.reason[0], UnexpectedTokenErrorNode)
assert isinstance(res.body.reason[0], UnexpectedTokenParsingError)
assert res.body.reason[0].token.type == TokenKind.EOF
assert res.body.reason[0].expected_tokens == [TokenKind.RPAR]
res = parser.parse(context, ParserInput(")"))
assert not res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.NOT_FOR_ME)
assert isinstance(res.body.reason[0], UnexpectedTokenErrorNode)
assert isinstance(res.body.reason[0], UnexpectedTokenParsingError)
assert res.body.reason[0].token.type == TokenKind.RPAR
assert res.body.reason[0].expected_tokens == []
res = parser.parse(context, ParserInput("one and two)"))
assert not res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.ERROR)
assert isinstance(res.body.body[0], UnexpectedTokenErrorNode)
assert isinstance(res.body.body[0], UnexpectedTokenParsingError)
assert res.body.body[0].token.type == TokenKind.RPAR
assert res.body.body[0].expected_tokens == []
res = parser.parse(context, ParserInput("one and two)"))
assert not res.status
assert sheerka.isinstance(res.body, BuiltinConcepts.ERROR)
assert isinstance(res.body.body[0], UnexpectedTokenErrorNode)
assert isinstance(res.body.body[0], UnexpectedTokenParsingError)
assert res.body.body[0].token.type == TokenKind.RPAR
assert res.body.body[0].expected_tokens == []
+2 -2
View File
@@ -5,7 +5,7 @@ import pytest
from core.builtin_concepts import ParserResultConcept, NotForMeConcept, BuiltinConcepts
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import LexerError, TokenKind
from parsers.PythonParser import PythonNode, PythonParser, PythonErrorNode, ConceptDetected
from parsers.PythonParser import PythonNode, PythonParser, PythonErrorNode, ConceptDetectedError
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -118,4 +118,4 @@ class TestPythonParser(TestUsingMemoryBasedSheerka):
assert not res.status
assert sheerka.isinstance(res.value, BuiltinConcepts.NOT_FOR_ME)
assert res.value.reason == [ConceptDetected(expected_id)]
assert res.value.reason == [ConceptDetectedError(expected_id)]
+2 -2
View File
@@ -1,7 +1,7 @@
import pytest
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.services.SheerkaExecute import ParserInput
from parsers.RuleParser import RuleParser, RuleNotFound
from parsers.RuleParser import RuleParser, RuleNotFoundError
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -59,7 +59,7 @@ class TestRuleParser(TestUsingMemoryBasedSheerka):
assert not res.status
assert sheerka.isinstance(error, BuiltinConcepts.ERROR)
assert errors_causes == [RuleNotFound("999999")]
assert errors_causes == [RuleNotFoundError("999999")]
def test_i_can_parse_rule(self):
sheerka, context, parser = self.init_parser()
+5 -5
View File
@@ -9,7 +9,7 @@ from parsers.BaseNodeParser import utnode, cnode, short_cnode, UnrecognizedToken
SCWC, CNC, UTN, SCN, CN
from parsers.PythonParser import PythonNode
from parsers.SyaNodeParser import SyaNodeParser, SyaConceptParserHelper, SyaAssociativity, \
NoneAssociativeSequenceErrorNode, TooManyParametersFound, InFixToPostFix, ParenthesisMismatchErrorNode
NoneAssociativeSequenceError, TooManyParametersFoundError, InFixToPostFix, ParenthesisMismatchError
import tests.parsers.parsers_utils
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
@@ -758,12 +758,12 @@ class TestSyaNodeParser(TestUsingMemoryBasedSheerka):
res = parser.infix_to_postfix(context, ParserInput(expression))
assert len(res) == 1
assert res[0].errors == [ParenthesisMismatchErrorNode(expected)]
assert res[0].errors == [ParenthesisMismatchError(expected)]
def test_i_can_detect_parenthesis_mismatch_error_special_case(self):
sheerka, context, parser = self.init_parser()
expression = "one ? function( : two"
expected = [ParenthesisMismatchErrorNode(("(", 5)), ParenthesisMismatchErrorNode(("(", 5))]
expected = [ParenthesisMismatchError(("(", 5)), ParenthesisMismatchError(("(", 5))]
res = parser.infix_to_postfix(context, ParserInput(expression))
assert len(res) == 1
@@ -780,7 +780,7 @@ class TestSyaNodeParser(TestUsingMemoryBasedSheerka):
assert len(res) == 1
assert len(res[0].errors) == 1
error = res[0].errors[0]
assert isinstance(error, TooManyParametersFound)
assert isinstance(error, TooManyParametersFoundError)
assert error.concept == cmap[expected[0]]
assert error.token.value == expected[1]
@@ -897,7 +897,7 @@ class TestSyaNodeParser(TestUsingMemoryBasedSheerka):
res = parser.infix_to_postfix(context, ParserInput("one less than two less than three"))
assert len(res) == 1
assert res[0].errors == [NoneAssociativeSequenceErrorNode(concepts_map["less than"], 2, 8)]
assert res[0].errors == [NoneAssociativeSequenceError(concepts_map["less than"], 2, 8)]
def test_i_can_post_fix_bnf_definition(self):
"""
-43
View File
@@ -38,38 +38,6 @@ class TestSheerkaPromptCompleter(TestUsingMemoryBasedSheerka):
assert as_dict["quit"].display_text == "quit"
assert as_dict["quit"].display_meta_text == "command"
def test_i_can_complete_with_pipeable(self):
sheerka = self.get_sheerka()
document = Document("| ")
completions = SheerkaPromptCompleter(sheerka).get_completions(document, CompleteEvent())
as_dict = {c.display_text: c for c in completions}
assert "first" in as_dict
assert as_dict["first"].text == "first()"
assert as_dict["first"].display_text == "first"
assert as_dict["first"].display_meta_text == "builtin"
assert "filter" in as_dict
assert as_dict["filter"].text == "filter("
assert as_dict["filter"].display_text == "filter"
assert as_dict["filter"].display_meta_text == "builtin"
def test_i_can_complete_with_pipeable_when_starting_to_write(self):
sheerka = self.get_sheerka()
document = Document("| f")
completions = SheerkaPromptCompleter(sheerka).get_completions(document, CompleteEvent())
as_dict = {c.display_text: c for c in completions}
assert "first" in as_dict
assert as_dict["first"].text == "first()"
assert as_dict["first"].display_text == "first"
assert as_dict["first"].display_meta_text == "builtin"
assert "filter" in as_dict
assert as_dict["filter"].text == "filter("
assert as_dict["filter"].display_text == "filter"
assert as_dict["filter"].display_meta_text == "builtin"
@pytest.mark.parametrize("text, expected", [
("func(", ["10", "20", "30"]),
("func(1", ["10"]),
@@ -95,17 +63,6 @@ class TestSheerkaPromptCompleter(TestUsingMemoryBasedSheerka):
as_list = [c.display_text for c in completions]
assert as_list == expected
@pytest.mark.parametrize("text, pos, expected", [
("", 0, False),
("foo", 3, False),
("|", 1, True),
("xxx | foo", 9, True),
("xxx | foo", 5, True),
("xxx | foo", 4, False),
])
def test_after_pipe(self, text, pos, expected):
assert SheerkaPromptCompleter.after_pipe(text, pos) == expected
@pytest.mark.parametrize("text, pos, expected", [
("", 0, ""),
("foo", 3, "foo"),