Implemented some enhancement requests

This commit is contained in:
2020-12-14 10:30:10 +01:00
parent 657c7536f7
commit e3c2adb533
46 changed files with 352 additions and 1286 deletions
+3 -4
View File
@@ -60,9 +60,9 @@ class ReturnValueConcept(Concept):
It's the main input for the evaluators
"""
ALL_ATTRIBUTES = ["who", "status", "value", "parents", "message"]
ALL_ATTRIBUTES = ["who", "status", "value", "parents"]
def __init__(self, who=None, status=None, value=None, parents=None, message=None, concept_id=None):
def __init__(self, who=None, status=None, value=None, parents=None, concept_id=None):
Concept.__init__(self,
BuiltinConcepts.RETURN_VALUE,
True,
@@ -74,11 +74,10 @@ class ReturnValueConcept(Concept):
self.set_value("status", status)
self.set_value("value", value)
self.set_value("parents", parents)
self.set_value("message", message)
self._metadata.is_evaluated = True
def __repr__(self):
return f"ReturnValue(who={self.who}, status={self.status}, value={self.value}, message={self.message})"
return f"ReturnValue(who={self.who}, status={self.status}, value={self.value})"
def __eq__(self, other):
if id(self) == id(other):
+4 -2
View File
@@ -67,7 +67,8 @@ class BuiltinConcepts:
TOO_MANY_SUCCESS = "__TOO_MANY_SUCCESS" # when expecting a limited number of successful return value
TOO_MANY_ERRORS = "__TOO_MANY_ERRORS" # when expecting a limited number of successful return value
ONLY_SUCCESSFUL = "__ONLY_SUCCESSFUL" # filter the result, only keep successful ones
MULTIPLE_ERRORS = "__MULTIPLE_ERRORS" # filter the result, only keep evaluator in error
MULTIPLE_ERRORS = "__MULTIPLE_ERRORS" # filter the result, only keep evaluators in error
MULTIPLE_SUCCESS = "__MULTIPLE_SUCCESS" # filter the result, only keep successful evaluators
NOT_FOR_ME = "__NOT_FOR_ME" # a parser recognize that the entry is not meant for it
IS_EMPTY = "__IS_EMPTY" # when a set is empty
NO_RESULT = "__NO_RESULT" # no return value returned
@@ -78,7 +79,7 @@ class BuiltinConcepts:
CONCEPT_EVAL_ERROR = "__CONCEPT_EVAL_ERROR" # cannot evaluate a property or metadata of a concept
ENUMERATION = "__ENUMERATION" # represents a list or a set
LIST = "__LIST" # represents a list
FILTERED = "__FILTERED" # represents the result of a filtering
FILTERED = "__FILTERED" # represents the result of a filtering, the filtering condition should be indicated
CONCEPT_ALREADY_IN_SET = "__CONCEPT_ALREADY_IN_SET"
NOT_A_SET = "__NOT_A_SET" # the concept has no entry in sets
CONDITION_FAILED = "__CONDITION_FAILED" # failed to validate where clause during evaluation
@@ -176,6 +177,7 @@ BuiltinContainers = [
BuiltinConcepts.TO_LIST,
BuiltinConcepts.TO_DICT,
BuiltinConcepts.TO_MULTI,
BuiltinConcepts.MULTIPLE_SUCCESS,
]
BuiltinOutConcepts = [
+2 -2
View File
@@ -7,7 +7,7 @@ from core.sheerka.services.SheerkaExecute import SheerkaExecute
from core.tokenizer import Keywords
from parsers.BaseNodeParser import SourceCodeNode, ConceptNode, UnrecognizedTokensNode, SourceCodeWithConceptNode, \
RuleNode
from parsers.BaseParser import BaseParser, ErrorNode
from parsers.BaseParser import BaseParser, ParsingError
PARSE_STEPS = [BuiltinConcepts.BEFORE_PARSING, BuiltinConcepts.PARSING, BuiltinConcepts.AFTER_PARSING]
EVAL_STEPS = PARSE_STEPS + [BuiltinConcepts.BEFORE_EVALUATION, BuiltinConcepts.EVALUATION,
@@ -254,7 +254,7 @@ def only_parsers_results(context, return_values):
# hack because some parsers don't follow the NOT_FOR_ME rule
temp_ret_val = []
for ret_val in return_values_ok:
if isinstance(ret_val.body.body, ErrorNode):
if isinstance(ret_val.body.body, ParsingError):
continue
if isinstance(ret_val.body.body, list) and \
len(ret_val.body.body) == 1 and \
+3 -13
View File
@@ -101,9 +101,9 @@ class Sheerka(Concept):
"test_using_context": SheerkaMethod(self.test_using_context, False),
"test_dict": SheerkaMethod(self.test_dict, False)
}
self.sheerka_pipeables = {}
self.locals = {}
self.concepts_ids = None
@property
def resolved_concepts_by_first_keyword(self):
@@ -149,16 +149,6 @@ class Sheerka(Concept):
setattr(self, as_name, bound_method)
def add_pipeable(self, func_name, function, has_side_effect):
"""
Adds a function that can bu used with pipe '|'
:param func_name:
:param function:
:param has_side_effect:
:return:
"""
self.sheerka_pipeables[func_name] = SheerkaMethod(function, has_side_effect)
def initialize(self, root_folder: str = None, save_execution_context=None, enable_process_return_values=None):
"""
Starting Sheerka
@@ -288,6 +278,7 @@ class Sheerka(Concept):
from core.sheerka.services.SheerkaConceptManager import SheerkaConceptManager
concept_service = self.services[SheerkaConceptManager.NAME]
concepts_ids = concept_service.initialize_builtin_concepts()
self.concepts_ids = concepts_ids
self.return_value_concept_id = concepts_ids[BuiltinConcepts.RETURN_VALUE]
self.error_concept_id = concepts_ids[BuiltinConcepts.ERROR]
@@ -565,13 +556,12 @@ class Sheerka(Concept):
concept._metadata.is_evaluated = True # because we have manually set the variables
return concept
def ret(self, who: str, status: bool, value, message=None, parents=None):
def ret(self, who: str, status: bool, value, parents=None):
"""
Creates and returns a ReturnValue concept
:param who:
:param status:
:param value:
:param message:
:param parents:
:return:
"""
+3 -3
View File
@@ -458,7 +458,7 @@ class SheerkaExecute(BaseService):
original_items = return_values[:]
evaluated_items = []
to_delete = []
to_delete = set()
for evaluator in grouped_evaluators[priority]:
evaluator.reset()
@@ -496,7 +496,7 @@ class SheerkaExecute(BaseService):
continue
# otherwise, item will be removed and replaced by result
to_delete.append(item)
to_delete.add(item)
if isinstance(result, list):
evaluated_items.extend(result)
elif isinstance(result, ReturnValueConcept):
@@ -525,7 +525,7 @@ class SheerkaExecute(BaseService):
for result in results:
if result.body != BuiltinConcepts.NO_RESULT:
evaluated_items.append(result)
to_delete.extend(result.parents)
to_delete.update(result.parents)
sub_context.add_values(return_values=results)
else:
sub_context.add_values(return_values=NO_MATCH)
-455
View File
@@ -1,455 +0,0 @@
# the principle and the Pipe class are taken from
# https://github.com/JulienPalard/Pipe
#
import builtins
import functools
import inspect
import itertools
import sys
from collections import deque
from cache.Cache import Cache
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept, ConceptParts
from core.sheerka.services.sheerka_service import BaseService
from core.utils import as_bag
from printer.FormatInstructions import FormatInstructions
from sheerkapickle.utils import is_primitive
class PropDesc:
def __init__(self, class_name, props):
self.class_name = class_name
self.props = props
def __repr__(self):
return f"({self.class_name}{self.props})"
def __eq__(self, other):
if id(other) == id(self):
return True
if not isinstance(other, PropDesc):
return False
return self.class_name == other.class_name and sorted(self.props) == sorted(other.props)
class Pipe:
"""
Represent a Pipeable Element :
Described as :
first = Pipe(lambda iterable: next(iter(iterable)))
and used as :
print [1, 2, 3] | first
printing 1
Or represent a Pipeable Function :
It's a function returning a Pipe
Described as :
select = Pipe(lambda iterable, pred: (pred(x) for x in iterable))
and used as :
print [1, 2, 3] | select(lambda x: x * 2)
# 2, 4, 6
"""
def __init__(self, function, context=None):
self.context = context
if isinstance(function, Pipe):
self.function = function.function
self.need_context = function.need_context
else:
signature = inspect.signature(function)
if len(signature.parameters) > 0 and list(signature.parameters.keys())[0] == "context":
self.need_context = True
self.function = (lambda x: function(context, x)) if len(signature.parameters) == 2 else function
else:
self.need_context = False
self.function = function
functools.update_wrapper(self, function)
def __ror__(self, other):
if isinstance(other, Concept) and other.key == str(BuiltinConcepts.EXPLANATION):
other.set_value(ConceptParts.BODY, self.function(other.body))
return other
return self.function(other)
def __call__(self, *args, **kwargs):
if self.need_context:
return Pipe(lambda x: self.function(self.context, x, *args, **kwargs), self.context)
else:
return Pipe(lambda x: self.function(x, *args, **kwargs), self.context)
class SheerkaFilter(BaseService):
NAME = "Filter"
PREDICATES_ENTRY = "Filter:Predicates"
def __init__(self, sheerka):
super().__init__(sheerka)
self.cache = Cache(max_size=30)
def initialize(self):
# For a weird reason, when the attribute @Pipe is directly added to the function
# all following instances have the context property null
for k, v in SheerkaFilter.__dict__.items():
if k.startswith("pipe_"):
if isinstance(v, staticmethod):
self.sheerka.add_pipeable(k[5:], v.__func__, True)
else:
self.sheerka.add_pipeable(k[5:], v.__get__(self, self.__class__), True)
self.sheerka.cache_manager.register_cache(self.PREDICATES_ENTRY, self.cache, False, False)
def get_compiled(self, file_name, predicate):
"""
Returns the compiled version of the predicate
:param file_name:
:param predicate:
:return:
"""
compiled = self.cache.get(predicate)
if compiled is not None:
return compiled
compiled = compile(predicate, f"<{file_name}>", "eval")
self.cache.put(predicate, compiled)
return compiled
@staticmethod
def pipe_first(iterable):
"""
Return the first element of the list
:param iterable:
:return:
"""
return next(iter(iterable))
@staticmethod
def pipe_take(iterable, n):
"""
Take the n first element of a list
:param iterable:
:param n:
:return:
"""
for item in iterable:
if n > 0:
n -= 1
yield item
else:
return
@staticmethod
def pipe_props(iterable):
"""
Return the list of available properties of the iterable
:return:
"""
for item in iterable:
yield PropDesc(type(item).__name__, list(as_bag(item).keys()))
@staticmethod
def pipe_tail(iterable, qte):
"Yield qte of elements in the given iterable."
return deque(iterable, maxlen=qte)
@staticmethod
def pipe_skip(iterable, qte):
"Skip qte elements in the given iterable, then yield others."
for item in iterable:
if qte == 0:
yield item
else:
qte -= 1
@staticmethod
def pipe_dedup(iterable, key=lambda x: x):
"""Only yield unique items. Use a set to keep track of duplicate data."""
seen = set()
for item in iterable:
dupkey = key(item)
if dupkey not in seen:
seen.add(dupkey)
yield item
@staticmethod
def pipe_uniq(iterable, key=lambda x: x):
"""Deduplicate consecutive duplicate values."""
iterator = iter(iterable)
try:
prev = next(iterator)
except StopIteration:
return
yield prev
prevkey = key(prev)
for item in iterator:
itemkey = key(item)
if itemkey != prevkey:
yield item
prevkey = itemkey
@staticmethod
def pipe_all(iterable, pred):
"""Returns True if ALL elements in the given iterable are true for the
given pred function"""
return builtins.all(pred(x) for x in iterable)
@staticmethod
def pipe_any(iterable, pred):
"""Returns True if ANY element in the given iterable is True for the
given pred function"""
return builtins.any(pred(x) for x in iterable)
@staticmethod
def pipe_average(iterable):
"""Build the average for the given iterable, starting with 0.0 as seed
Will try a division by 0 if the iterable is empty...
"""
# warnings.warn(
# "pipe.average is deprecated, use statistics.mean instead.",
# DeprecationWarning,
# stacklevel=4,
# )
total = 0.0
qte = 0
for element in iterable:
total += element
qte += 1
return total / qte
@staticmethod
def pipe_count(iterable):
"Count the size of the given iterable, walking thrue it."
# warnings.warn(
# "pipe.count is deprecated, use the builtin len() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
count = 0
for element in iterable:
count += 1
return count
@staticmethod
def pipe_max(iterable, **kwargs):
# warnings.warn(
# "pipe.max is deprecated, use the builtin max() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return builtins.max(iterable, **kwargs)
@staticmethod
def pipe_min(iterable, **kwargs):
# warnings.warn(
# "pipe.min is deprecated, use the builtin min() instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return builtins.min(iterable, **kwargs)
@staticmethod
def pipe_as_dict(iterable):
# warnings.warn(
# "pipe.as_dict is deprecated, use dict(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return dict(iterable)
@staticmethod
def pipe_as_set(iterable):
# warnings.warn(
# "pipe.as_set is deprecated, use set(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return set(iterable)
@staticmethod
def pipe_permutations(iterable, r=None):
# permutations('ABCD', 2) --> AB AC AD BA BC BD CA CB CD DA DB DC
# permutations(range(3)) --> 012 021 102 120 201 210
for x in itertools.permutations(iterable, r):
yield x
# @staticmethod
# def pipe_netcat(to_send, host, port):
# with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
# s.connect((host, port))
# for data in to_send | traverse:
# s.send(data)
# while 1:
# data = s.recv(4096)
# if not data:
# break
# yield data
#
# @staticmethod
# def pipe_netwrite(to_send, host, port):
# warnings.warn("pipe.netwite is deprecated.", DeprecationWarning, stacklevel=4)
# with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
# s.connect((host, port))
# for data in to_send | SheerkaFilter.pipe_traverse:
# s.send(data)
@staticmethod
def pipe_traverse(iterable):
for arg in iterable:
try:
if isinstance(arg, str):
yield arg
else:
for i in arg | SheerkaFilter.pipe_traverse:
yield i
except TypeError:
# not iterable --- output leaf
yield arg
@staticmethod
def pipe_concat(iterable, separator=", "):
# warnings.warn(
# "pipe.concat is deprecated, use ', '.join(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return separator.join(builtins.map(str, iterable))
@staticmethod
def pipe_as_list(iterable):
# warnings.warn(
# "pipe.as_list is deprecated, use list(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return list(iterable)
@staticmethod
def pipe_as_tuple(iterable):
# warnings.warn(
# "pipe.as_tuple is deprecated, use tuple(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return tuple(iterable)
@staticmethod
def pipe_tee(iterable):
for item in iterable:
sys.stdout.write(str(item) + "\n")
yield item
@staticmethod
def pipe_to_file(iterable, fname, glue="\n"):
with open(fname, "w") as f:
for item in iterable:
f.write(str(item) + glue)
@staticmethod
def pipe_add(x):
# warnings.warn(
# "pipe.add is deprecated, use sum(your | pipe) instead.",
# DeprecationWarning,
# stacklevel=4,
# )
return sum(x)
@staticmethod
def pipe_select(iterable, selector):
return builtins.map(selector, iterable)
@staticmethod
def pipe_format_l(iterable, template, when=None):
"""
Define a formatting when printing a list of items
:param iterable:
:param template:
:param when: format_l is set when the condition is verified
:return:
"""
for item in iterable:
if hasattr(item, "get_format_instructions"):
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_format_l(item, template)
item.set_format_instructions(instructions)
yield item
@staticmethod
def pipe_format_d(iterable, *props, when=None, **format_l):
"""
Define a formatting when printing the detail of an item
:param iterable:
:param props: list of properties to display
:param when: format_d is set when the condition is verified
:param format_l: custom formatting when printing the value of a property
:return:
"""
template = dict((p, "{" + p + "}") for p in props)
for k, v in format_l.items():
template[k] = v
for item in iterable:
if hasattr(item, "get_format_instructions"):
if len(template) == 0:
bag = as_bag(item)
template = dict((p, "{" + p + "}") for p in bag)
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_format_d(item, template)
item.set_format_instructions(instructions)
yield item
@staticmethod
def pipe_recurse(iterable, depth, prop_name="_children", when=None):
"""
When printing an object that has sub properties,
indicate the depth of recursion to apply to a specific properties
Quick and dirty version because the prop name is not taken from the item (but set to '_children' by default)
:param iterable:
:param depth:
:param prop_name:
:param when: recurse is set when the condition is verified
:return:
"""
for item in iterable:
if hasattr(item, "get_format_instructions"):
instructions = item.get_format_instructions() or FormatInstructions()
instructions.set_recurse(prop_name, depth)
item.set_format_instructions(instructions)
yield item
def pipe_filter(self, iterable, predicate):
compiled = self.get_compiled("filter", predicate)
for item in iterable:
try:
context = {} if is_primitive(item) else as_bag(item)
context["self"] = item
if eval(compiled, context):
yield item
except NameError:
pass
def pipe_inspect(self, iterable, path, when=None):
"""
Follow the path
:param iterable:
:param path:
:param when:
:return:
"""
# quick and dirty implementation as it does not handle dictionaries items
for item in iterable:
try:
props = path.split(".")
for prop in props:
compiled = self.get_compiled("inspect", prop)
context = {} if is_primitive(item) else as_bag(item)
item = eval(compiled, context)
yield item
except Exception as ex:
yield ex
+2 -2
View File
@@ -59,11 +59,11 @@ class SheerkaOut(BaseService):
sub_context.protected_hints.add(BuiltinConcepts.EVAL_WHERE_REQUESTED)
sub_context.protected_hints.add(BuiltinConcepts.EVAL_UNTIL_SUCCESS_REQUESTED)
sub_context.protected_hints.add(BuiltinConcepts.EVAL_QUESTION_REQUESTED)
# sub_context.deactivate_push()
sub_context.deactivate_push()
out_tree = self.create_out_tree(sub_context, ret)
# sub_context.activate_push()
sub_context.activate_push()
if out_tree:
for visitor in self.out_visitors:
@@ -654,43 +654,48 @@ class SheerkaRuleManager(BaseService):
def init_builtin_rules(self, context):
# self.sheerka.init_log.debug("Initializing default rules")
rules = [
# [0] Rule #1 Rule #2 in debug
# index=[0] in code, id=1 Rule #2 in debug
Rule("print", "Print return values", "__rets", "list(__rets)"),
# [1] Rule #2 in debug
# index=[1] in code, id=2 in debug
Rule("print", "Print ReturnValue",
"__ret",
"\\ReturnValue(who={__ret.who}, status={__ret.status}, value={__ret.value})"),
# [2] Rule #3 in debug
# index=[2] in code, id=3 in debug
Rule("print", "Failed ReturnValue in red",
"__ret and not __ret.status",
"red(__ret)"),
# [3] Rule #4 in debug
# index=[3] in code, id=4 in debug
Rule("print", "List explanations",
"isinstance(__ret_container, BuiltinConcepts.EXPLANATION)",
"blue(__ret_container.digest) : {__ret_container.command}\nlist(__ret_container)"),
# [4] Rule #5 in debug
# index=[4] in code, id=5 in debug
Rule("print", "Print ExecutionContext",
"isinstance(__obj, ExecutionContext)",
"[{id:3}] {__tab}{desc} ({status})"),
# [6] Rule #7 in debug
# index=[5] in code, id=6 in debug
Rule("print", "Display formatted list",
"isinstance(__ret_container, BuiltinConcepts.TO_LIST)",
"list(__ret_container)"),
# [7] Rule #8 in debug
# index=[6] in code, id=7 in debug
Rule("print", "Display formatted dict",
"isinstance(__ret_container, BuiltinConcepts.TO_DICT)",
"dict(__ret_container)"),
# [8] Rule #9 in debug
# index=[7] in code, id=8 in debug
Rule("print", "Display multiple outputs",
"isinstance(__ret_container, BuiltinConcepts.TO_MULTI)",
"multi(__ret_container)"),
# index=[8] in code, id=9 in debug
Rule("print", "Display multiple success",
"isinstance(__ret_container, BuiltinConcepts.MULTIPLE_SUCCESS)",
"list(__ret_container.body)"),
]
for r in rules:
@@ -700,6 +705,7 @@ class SheerkaRuleManager(BaseService):
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[3], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[5], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[6], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_less_than(context, BuiltinConcepts.PRECEDENCE, rules[1], rules[8], RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_greater_than(context, BuiltinConcepts.PRECEDENCE, rules[7], rules[6],
RULE_COMPARISON_CONTEXT)
self.sheerka.set_is_greater_than(context, BuiltinConcepts.PRECEDENCE, rules[7], rules[5],
+1 -2
View File
@@ -3,8 +3,7 @@ from core.builtin_concepts import BuiltinConcepts, ParserResultConcept
from core.rule import Rule
from core.tokenizer import Keywords
from evaluators.BaseEvaluator import OneReturnValueEvaluator
from parsers.BaseParser import BaseParser
from parsers.FormatRuleParser import FormatRuleNode
from parsers.DefFormatRuleParser import FormatRuleNode
class FormatRuleEvaluator(OneReturnValueEvaluator):
+1 -1
View File
@@ -5,7 +5,7 @@ from parsers.BaseParser import BaseParser
class MultipleErrorsEvaluator(AllReturnValuesEvaluator):
"""
Use to reduce to evaluator errors
Used to reduce to evaluator errors
All parser error will be discarded
Cannot match if there is at least one successful evaluator
"""
@@ -0,0 +1,67 @@
from core.builtin_concepts import BuiltinConcepts
from evaluators.BaseEvaluator import AllReturnValuesEvaluator, BaseEvaluator
from parsers.BaseParser import BaseParser
class MultipleSuccessEvaluator(AllReturnValuesEvaluator):
"""
Used to reduce evaluators
It's used when there are multiple successful return values, but their values are not evaluated
So we cannot decide whether it's a MultipleSameSuccess or not
All parser in error will be discarded
Cannot match if there is at least one evaluator in error
"""
NAME = "MultipleSuccess"
def __init__(self):
super().__init__(self.NAME, [BuiltinConcepts.AFTER_EVALUATION], 30)
self.successful_return_values = []
def reset(self):
super().reset()
self.successful_return_values.clear()
def matches(self, context, return_values):
nb_evaluators_in_success = 0
to_process = False
for ret in return_values:
if ret.status and ret.who.startswith(BaseParser.PREFIX):
return False
elif ret.who.startswith(BaseEvaluator.PREFIX) and not ret.status:
return False
elif ret.status and context.sheerka.isinstance(ret.body, BuiltinConcepts.REDUCE_REQUESTED):
to_process = True
self.eaten.append(ret)
elif ret.status and ret.who.startswith(BaseEvaluator.PREFIX):
if self.already_seen(context, ret):
return False
nb_evaluators_in_success += 1
self.successful_return_values.append(ret)
self.eaten.append(ret)
elif not ret.status and ret.who.startswith(BaseParser.PREFIX):
self.eaten.append(ret)
# else:
# other concepts. We do not care if there are successful or not
# They won't be part of result nor part of the parent
# --> So they will be handled by other evaluators
return to_process and nb_evaluators_in_success > 1
def eval(self, context, return_values):
context.log(f"{len(self.successful_return_values)} successful return values, {len(self.eaten)} item(s) eaten",
who=self)
context.log(f"{self.successful_return_values}", who=self)
sheerka = context.sheerka
return sheerka.ret(
self.name,
True,
sheerka.new(BuiltinConcepts.MULTIPLE_SUCCESS, body=self.successful_return_values.copy()),
parents=self.eaten)
def already_seen(self, context, ret_val):
for successful in self.successful_return_values:
if context.sheerka.objvalue(successful.value) == context.sheerka.objvalue(ret_val.value):
return True
return False
+12 -12
View File
@@ -2,7 +2,7 @@ from dataclasses import dataclass, field
import core.utils
from core.tokenizer import Keywords, TokenKind, Tokenizer
from parsers.BaseParser import BaseParser, Node, ErrorNode, UnexpectedEofNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, Node, ParsingError, UnexpectedEofParsingError, UnexpectedTokenParsingError
@dataclass()
@@ -14,7 +14,7 @@ class CustomGrammarParserNode(Node):
@dataclass()
class SyntaxErrorNode(CustomGrammarParserNode, ErrorNode):
class SyntaxErrorNode(CustomGrammarParserNode, ParsingError):
"""
The input is recognized, but there is a syntax error
"""
@@ -40,7 +40,7 @@ class SyntaxErrorNode(CustomGrammarParserNode, ErrorNode):
@dataclass()
class KeywordNotFound(CustomGrammarParserNode, ErrorNode):
class KeywordNotFound(CustomGrammarParserNode, ParsingError):
keywords: list
def __eq__(self, other):
@@ -98,12 +98,12 @@ class BaseCustomGrammarParser(BaseParser):
return None
if tokens[pos].type != TokenKind.NEWLINE:
self.add_error(UnexpectedTokenErrorNode("New line not found.", tokens[pos], [TokenKind.NEWLINE]))
self.add_error(UnexpectedTokenParsingError("New line not found.", tokens[pos], [TokenKind.NEWLINE]))
return None
pos += 1
if tokens[pos].type != TokenKind.WHITESPACE:
self.add_error(UnexpectedTokenErrorNode("Indentation not found.", tokens[pos], [TokenKind.WHITESPACE]))
self.add_error(UnexpectedTokenParsingError("Indentation not found.", tokens[pos], [TokenKind.WHITESPACE]))
return None
indent_size = get_tab_size(self.DEFAULT_TAB_SIZE, tokens[pos].value)
@@ -113,9 +113,9 @@ class BaseCustomGrammarParser(BaseParser):
while i < len(tokens) - 1:
if tokens[i].type == TokenKind.NEWLINE:
if tokens[i + 1].type != TokenKind.WHITESPACE:
self.add_error(UnexpectedTokenErrorNode("Indentation not found.",
tokens[i + 1],
[TokenKind.WHITESPACE]))
self.add_error(UnexpectedTokenParsingError("Indentation not found.",
tokens[i + 1],
[TokenKind.WHITESPACE]))
return None
if get_tab_size(self.DEFAULT_TAB_SIZE, tokens[i + 1].value) < indent_size:
@@ -177,9 +177,9 @@ class BaseCustomGrammarParser(BaseParser):
token = self.parser_input.token
if expected_first_token and token.value != expected_first_token.value:
self.add_error(UnexpectedTokenErrorNode(f"'{expected_first_token.value}' keyword not found.",
token,
[expected_first_token]))
self.add_error(UnexpectedTokenParsingError(f"'{expected_first_token.value}' keyword not found.",
token,
[expected_first_token]))
return None
if token.value not in keywords:
@@ -225,7 +225,7 @@ class BaseCustomGrammarParser(BaseParser):
res[keyword] = [token] # to keep track of when it starts
colon_mode_activated = self.parser_input.the_token_after().type == TokenKind.COLON
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"While parsing keyword '{keyword.value}'."))
self.add_error(UnexpectedEofParsingError(f"While parsing keyword '{keyword.value}'."))
break
else:
res[keyword].append(token)
+3 -3
View File
@@ -8,7 +8,7 @@ from core.builtin_concepts import BuiltinConcepts
from core.concept import VARIABLE_PREFIX, Concept, DEFINITION_TYPE_BNF, ConceptParts
from core.rule import Rule
from core.tokenizer import TokenKind, Token
from parsers.BaseParser import Node, BaseParser, ErrorNode
from parsers.BaseParser import Node, BaseParser, ParsingError
DEBUG_COMPILED = True
@@ -19,7 +19,7 @@ class ChickenAndEggError(Exception):
@dataclass
class NoFirstTokenError(ErrorNode):
class NoFirstTokenError(ParsingError):
concept: Concept
key: str
@@ -424,7 +424,7 @@ class SourceCodeWithConceptNode(LexerNode):
@dataclass()
class GrammarErrorNode(ErrorNode):
class GrammarErrorNode(ParsingError):
message: str
+13 -208
View File
@@ -35,14 +35,6 @@ class Node:
pass
@dataclass()
class NopNode(Node):
pass
def __repr__(self):
return "nop"
class NotInitializedNode(Node):
pass
@@ -51,12 +43,12 @@ class NotInitializedNode(Node):
@dataclass()
class ErrorNode(Node, ErrorObj):
class ParsingError(Node, ErrorObj):
pass
@dataclass()
class UnexpectedTokenErrorNode(ErrorNode):
class UnexpectedTokenParsingError(ParsingError):
message: str
token: Union[Token, str]
expected_tokens: list
@@ -65,7 +57,7 @@ class UnexpectedTokenErrorNode(ErrorNode):
if id(other) == id(self):
return True
if not isinstance(other, UnexpectedTokenErrorNode):
if not isinstance(other, UnexpectedTokenParsingError):
return False
if self.message != other.message:
@@ -82,8 +74,8 @@ class UnexpectedTokenErrorNode(ErrorNode):
@dataclass()
class UnexpectedEofNode(ErrorNode):
message: str
class UnexpectedEofParsingError(ParsingError):
message: str = None
class BaseParser:
@@ -214,46 +206,16 @@ class BaseParser:
return parser_input.value
@staticmethod
def manage_eof(lst, strip_eof):
if strip_eof:
if len(lst) and lst[-1].type == TokenKind.EOF:
lst.pop()
return lst
if len(lst) == 0 or not lst[-1].type == TokenKind.EOF:
lst.append(Token(TokenKind.EOF, "", -1, -1, -1))
return lst
# @staticmethod
# def get_text_from_tokens(tokens, custom_switcher=None, tracker=None):
# """
# Create the source code, from the list of token
# :param tokens: list of tokens
# :param custom_switcher: to override the behaviour (the return value) of some token
# :param tracker: keep track of the original token value when custom switched
# :return:
# """
# if tokens is None:
# return ""
# res = ""
# def manage_eof(lst, strip_eof):
# if strip_eof:
# if len(lst) and lst[-1].type == TokenKind.EOF:
# lst.pop()
# return lst
#
# if not hasattr(tokens, "__iter__"):
# tokens = [tokens]
#
# switcher = {
# # TokenKind.CONCEPT: lambda t: core.utils.str_concept(t.value),
# }
#
# if custom_switcher:
# switcher.update(custom_switcher)
#
# for token in tokens:
# value = switcher.get(token.type, lambda t: t.str_value)(token)
# res += value
# if tracker is not None and token.type in custom_switcher:
# tracker[value] = token.value
# return res
# if len(lst) == 0 or not lst[-1].type == TokenKind.EOF:
# lst.append(Token(TokenKind.EOF, "", -1, -1, -1))
# return lst
@staticmethod
def get_tokens_boundaries(tokens):
@@ -302,160 +264,3 @@ class BaseParser:
@staticmethod
def get_name(name):
return BaseParser.PREFIX + name
class BaseTokenizerIterParser(BaseParser):
def __init__(self, name, priority, parse_word=False, none_on_eof=True):
super().__init__(name, priority)
self.lexer_iter = None
self._current = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
self.parse_word = parse_word
self.none_on_eof = none_on_eof
def reset_parser(self, context, text):
self.context = context
self.sheerka = context.sheerka
self.text = text
self.lexer_iter = iter(Tokenizer(text, self.parse_word))
self._current = None
self.next_token()
def add_error(self, error, next_token=True):
self.error_sink.append(error)
if next_token:
self.next_token()
return error
def get_token(self) -> Token:
return self._current
def next_token(self, skip_whitespace=True):
try:
self._current = next(self.lexer_iter)
if self.none_on_eof and self._current.type == TokenKind.EOF:
self._current = None
return False
if skip_whitespace:
while self._current.type == TokenKind.WHITESPACE or self._current.type == TokenKind.NEWLINE:
self._current = next(self.lexer_iter)
except StopIteration:
self._current = None
return False
return True
class BaseSplitIterParser(BaseParser):
def __init__(self, name, priority, none_on_eof=False):
super().__init__(name, priority)
self._current = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
self.iter_split = None
self.split_and_eat_tokens = (" ", "\n", "\t")
self.split_and_keep_tokens = ("=", ")", "(", ",")
self.split_tokens = self.split_and_eat_tokens + self.split_and_keep_tokens
self.none_on_eof = none_on_eof # current token is set to None when EOF is hit
def parse_word(self, c, index, line, column):
end = self.split_tokens
escaped = False
buffer = ""
while escaped or c not in end:
if not escaped and c == "\\":
escaped = True
elif not escaped and c in ("'", '"'):
end = [c]
else:
buffer += c
escaped = False
index, column = index + 1, column + 1
if index == len(self.text):
break
c = self.text[index]
if c == "\n":
line += 1
column = 0
if c not in self.split_and_keep_tokens: # 'not in' instead of 'in' to when c is a quote
index, column = index + 1, column + 1
return buffer, index, line, column
def split(self):
index = 0
line = 1
column = 1
while index < len(self.text):
c = self.text[index]
if c == "=":
if index + 1 < len(self.text) and self.text[index + 1] == "=":
yield Token(TokenKind.EQUALSEQUALS, "==", index, line, column)
index, column = index + 2, column + 2
else:
yield Token(TokenKind.EQUALS, "=", index, line, column)
index, column = index + 1, column + 1
elif c == ")":
yield Token(TokenKind.RPAR, ")", index, line, column)
index, column = index + 1, column + 1
elif c == "(":
yield Token(TokenKind.LPAR, "(", index, line, column)
index, column = index + 1, column + 1
elif c == ",":
yield Token(TokenKind.COMMA, ",", index, line, column)
index, column = index + 1, column + 1
else:
buffer, end_index, end_line, end_column = self.parse_word(c, index, line, column)
if buffer:
yield Token(TokenKind.WORD, buffer, index, line, column)
index, line, column = end_index, end_line, end_column
yield Token(TokenKind.EOF, "<eof>", index, line, column)
def reset_parser(self, context, text):
self.context = context
self.sheerka = context.sheerka if context else None
self.text = text
self._current = None
self.iter_split = iter(self.split())
def add_error(self, error, next_token=True):
self.error_sink.append(error)
if next_token:
self.next_token()
return error
def get_token(self) -> Token:
return self._current
def next_token(self):
try:
self._current = next(self.iter_split)
if self._current.type == TokenKind.EOF:
if self.none_on_eof:
self._current = None
return False
except StopIteration:
self._current = None
return False
return True
+6 -13
View File
@@ -1,19 +1,12 @@
from dataclasses import dataclass
import core.utils
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.Sheerka import ExecutionContext
from core.tokenizer import Tokenizer, Token, TokenKind, LexerError
from parsers.BaseParser import BaseParser, ErrorNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError
from parsers.BnfNodeParser import OrderedChoice, Sequence, Optional, ZeroOrMore, OneOrMore, \
ConceptExpression, StrMatch
@dataclass()
class UnexpectedEndOfFileError(ErrorNode):
pass
class BnfDefinitionParser(BaseParser):
"""
Parser used to transform literal into ParsingExpression
@@ -52,7 +45,7 @@ class BnfDefinitionParser(BaseParser):
self.context = context
self.sheerka = context.sheerka
self.source = ""
self.lexer_iter = iter(Tokenizer(text.strip())) if isinstance(text, str) else iter(text)
self._current = None
self.after_current = None
@@ -123,7 +116,7 @@ class BnfDefinitionParser(BaseParser):
token = self.get_token()
if token and token.type != TokenKind.EOF:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, []))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, []))
except LexerError as e:
return self.sheerka.ret(
self.name,
@@ -221,7 +214,7 @@ class BnfDefinitionParser(BaseParser):
def parse_expression(self):
token = self.get_token()
if token.type == TokenKind.EOF:
self.add_error(UnexpectedEndOfFileError(), False)
self.add_error(UnexpectedEofParsingError(), False)
if token.type == TokenKind.LPAR:
self.nb_open_par += 1
self.next_token()
@@ -232,7 +225,7 @@ class BnfDefinitionParser(BaseParser):
self.next_token()
return self.eat_rule_name_if_needed(expr)
else:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
return expr
if token.type == TokenKind.CONCEPT:
@@ -291,7 +284,7 @@ class BnfDefinitionParser(BaseParser):
if token is None or token.type != TokenKind.IDENTIFIER:
return self.add_error(
UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.IDENTIFIER]))
UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.IDENTIFIER]))
expression.rule_name = token.value
self.next_token()
-66
View File
@@ -1341,72 +1341,6 @@ class BnfNodeParser(BaseNodeParser):
debugger.debug_var("result", concept_parser_helpers)
return concept_parser_helpers
def fix_infinite_recursions(self, context, grammar, concept_id, parsing_expression):
"""
Check the newly created parsing expression
Some infinite recursion can be resolved, simply by removing the pexpression that causes the loop
Let's look for that
:param context:
:param grammar:
:param concept_id:
:param parsing_expression:
:return:
"""
def _find(expression_, path_):
index_ = -1
parent_ = None
for node_id in path_:
expression_ = expression_.nodes[0] if isinstance(expression_, ConceptExpression) else expression_
for i, node in [(i, n) for i, n in enumerate(expression_.nodes) if isinstance(n, ConceptExpression)]:
if node_id == node.concept.id:
index_ = i
parent_ = expression_
expression_ = node # take the child of the ConceptExpression found
break
else:
raise IndexError(f"path {path_} cannot be found in '{expression_}'")
return parent_, index_, expression_
def _fix_node(expression, path):
parent, index, expression_update = _find(expression, path[1:-2])
assert isinstance(expression_update, ConceptExpression)
desc = f"Fixing circular reference {path}"
with context.push(BuiltinConcepts.INIT_BNF,
expression_update.concept,
who=self.name,
obj=expression_update.concept,
concepts_to_skip=[concept_id],
desc=desc) as sub_context:
new_grammar = grammar.copy()
for node_id in path[-2:]:
del new_grammar[node_id]
new_nodes = self.resolve_concept_parsing_expression(sub_context,
expression_update.concept,
expression_update.rule_name, new_grammar, set())
new = ConceptExpression(expression_update.concept,
rule_name=expression_update.rule_name,
nodes=new_nodes)
parent.nodes[index] = new
while True:
already_found = [concept_id]
concepts_in_recursion = []
if self.check_for_infinite_recursion(parsing_expression, already_found, concepts_in_recursion):
if "#" in concepts_in_recursion[-2]:
# means that it's isaset concept
_fix_node(parsing_expression, concepts_in_recursion[:-1])
else:
break
else:
break
return concepts_in_recursion
def check_for_infinite_recursion(self, parsing_expression, already_found, in_recursion, only_first=False):
if isinstance(parsing_expression, ConceptExpression):
+9 -9
View File
@@ -7,7 +7,7 @@ from core.concept import ConceptParts, DEFINITION_TYPE_BNF, DEFINITION_TYPE_DEF
from core.sheerka.services.SheerkaExecute import ParserInput, SheerkaExecute
from core.tokenizer import TokenKind, Keywords
from parsers.BaseCustomGrammarParser import BaseCustomGrammarParser, SyntaxErrorNode
from parsers.BaseParser import Node, ErrorNode, NotInitializedNode, UnexpectedTokenErrorNode
from parsers.BaseParser import Node, ParsingError, NotInitializedNode, UnexpectedTokenParsingError
from parsers.BnfDefinitionParser import BnfDefinitionParser
@@ -17,7 +17,7 @@ class ParsingException(Exception):
@dataclass()
class DefConceptParserNode(Node):
class DefConceptParsingResult(Node):
"""
Base node for all default parser nodes
"""
@@ -25,12 +25,12 @@ class DefConceptParserNode(Node):
@dataclass()
class DefConceptParserErrorNode(DefConceptParserNode, ErrorNode):
class DefConceptParsingError(DefConceptParsingResult, ParsingError):
pass
@dataclass()
class CannotHandleErrorNode(DefConceptParserErrorNode):
class CannotHandleParsingError(DefConceptParsingError):
"""
The input is not recognized
"""
@@ -38,7 +38,7 @@ class CannotHandleErrorNode(DefConceptParserErrorNode):
@dataclass()
class NameNode(DefConceptParserNode):
class NameNode(DefConceptParsingResult):
def get_name(self):
name = ""
@@ -70,7 +70,7 @@ class NameNode(DefConceptParserNode):
@dataclass()
class DefConceptNode(DefConceptParserNode):
class DefConceptNode(DefConceptParsingResult):
name: NameNode = NotInitializedNode()
where: ReturnValueConcept = NotInitializedNode()
pre: ReturnValueConcept = NotInitializedNode()
@@ -92,7 +92,7 @@ class DefConceptNode(DefConceptParserNode):
@dataclass()
class IsaConceptNode(DefConceptParserNode):
class IsaConceptNode(DefConceptParsingResult):
concept: NameNode = NotInitializedNode()
set: NameNode = NotInitializedNode()
@@ -146,7 +146,7 @@ class DefConceptParser(BaseCustomGrammarParser):
"""
token = self.parser_input.token
if token.value != Keywords.DEF.value:
self.add_error(UnexpectedTokenErrorNode("'def' keyword not found.", token, [Keywords.DEF]))
self.add_error(UnexpectedTokenParsingError("'def' keyword not found.", token, [Keywords.DEF]))
return None
self.context.log("Keyword DEF found.", self.name)
@@ -168,7 +168,7 @@ class DefConceptParser(BaseCustomGrammarParser):
keywords_found.extend([t[0] for t in parts.values()]) # keep track of all keywords found
node = DefConceptNode(keywords_found)
# if first_token.type == TokenKind.EOF:
# return self.add_error(UnexpectedTokenErrorNode([first_token], "Unexpected end of file", [Keywords.CONCEPT]))
# return self.add_error(UnexpectedTokenParsingError([first_token], "Unexpected end of file", [Keywords.CONCEPT]))
# get the name
node.name = self.get_concept_name(parts[Keywords.CONCEPT])
@@ -16,7 +16,7 @@ class FormatRuleNode(Node):
format_ast: FormatAstNode = None
class FormatRuleParser(BaseCustomGrammarParser):
class DefFormatRuleParser(BaseCustomGrammarParser):
"""
Class that will parse formatting rules definitions
eg: when xxx print yyy
@@ -28,7 +28,7 @@ class FormatRuleParser(BaseCustomGrammarParser):
KEYWORDS_VALUES = [k.value for k in KEYWORDS]
def __init__(self, **kwargs):
BaseCustomGrammarParser.__init__(self, "FormatRule", 60)
BaseCustomGrammarParser.__init__(self, "DefFormatRule", 60)
def parse(self, context, parser_input: ParserInput):
"""
+9 -7
View File
@@ -4,8 +4,8 @@ from typing import List, Tuple, Callable
from core.builtin_concepts import BuiltinConcepts
from core.concept import Concept
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import LexerError, TokenKind, Token
from parsers.BaseParser import Node, BaseParser, UnexpectedTokenErrorNode, UnexpectedEofNode, ErrorNode
from core.tokenizer import TokenKind, Token
from parsers.BaseParser import Node, BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError, ParsingError
class ExprNode(Node):
@@ -19,7 +19,7 @@ class ExprNode(Node):
@dataclass()
class LeftPartNotFoundError(ErrorNode):
class LeftPartNotFoundError(ParsingError):
"""
When the expression starts with 'or' or 'and'
"""
@@ -161,6 +161,7 @@ class OrNode(ExprNode):
def __str__(self):
return " or ".join([str(p) for p in self.parts])
@dataclass()
class NotNode(ExprNode):
node: ExprNode
@@ -219,7 +220,7 @@ class ExpressionParser(BaseParser):
tree = self.parse_or()
token = self.parser_input.token
if token and token.type != TokenKind.EOF:
self.add_error(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, []))
self.add_error(UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, []))
value = self.get_return_value_body(context.sheerka, self.parser_input.as_text(), tree, tree)
@@ -241,7 +242,7 @@ class ExpressionParser(BaseParser):
self.parser_input.next_token()
expr = self.parse_and()
if expr is None:
self.add_error(UnexpectedEofNode("When parsing 'or'"))
self.add_error(UnexpectedEofParsingError("When parsing 'or'"))
return OrNode(*parts)
parts.append(expr)
token = self.parser_input.token
@@ -259,7 +260,7 @@ class ExpressionParser(BaseParser):
self.parser_input.next_token()
expr = self.parse_names()
if expr is None:
self.add_error(UnexpectedEofNode("When parsing 'and'"))
self.add_error(UnexpectedEofParsingError("When parsing 'and'"))
return AndNode(*parts)
parts.append(expr)
token = self.parser_input.token
@@ -282,7 +283,8 @@ class ExpressionParser(BaseParser):
expr = self.parse_or()
token = self.parser_input.token
if token.type != TokenKind.RPAR:
self.error_sink.append(UnexpectedTokenErrorNode(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
self.error_sink.append(
UnexpectedTokenParsingError(f"Unexpected token '{token}'", token, [TokenKind.RPAR]))
return expr
self.parser_input.next_token()
return expr
+16 -16
View File
@@ -9,7 +9,7 @@ from core.tokenizer import TokenKind, Token
from core.utils import get_n_clones
from parsers.SequenceNodeParser import SequenceNodeParser
from parsers.BaseNodeParser import SourceCodeNode, SourceCodeWithConceptNode, UnrecognizedTokensNode
from parsers.BaseParser import BaseParser, UnexpectedTokenErrorNode, UnexpectedEofNode, Node
from parsers.BaseParser import BaseParser, UnexpectedTokenParsingError, UnexpectedEofParsingError, Node
from parsers.BnfNodeParser import BnfNodeParser
from parsers.PythonWithConceptsParser import PythonWithConceptsParser
from parsers.RuleParser import RuleParser
@@ -191,9 +191,9 @@ class FunctionParser(BaseParser):
node = self.parse_function()
if self.parser_input.next_token():
self.add_error(UnexpectedTokenErrorNode("Only one function supported",
self.parser_input.token,
[TokenKind.EOF]))
self.add_error(UnexpectedTokenParsingError("Only one function supported",
self.parser_input.token,
[TokenKind.EOF]))
if self.has_error:
if node is None:
@@ -222,25 +222,25 @@ class FunctionParser(BaseParser):
start = self.parser_input.pos
token = self.parser_input.token
if token.type != TokenKind.IDENTIFIER:
self.add_error(UnexpectedTokenErrorNode(f"{token.repr_value} is not a identifier",
token,
[TokenKind.IDENTIFIER]))
self.add_error(UnexpectedTokenParsingError(f"{token.repr_value} is not a identifier",
token,
[TokenKind.IDENTIFIER]))
return None
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"Unexpected EOF while parsing left parenthesis"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF while parsing left parenthesis"))
return None
token = self.parser_input.token
if token.type != TokenKind.LPAR:
self.add_error(UnexpectedTokenErrorNode(f"{token.repr_value} is not a left parenthesis",
token,
[TokenKind.LPAR]))
self.add_error(UnexpectedTokenParsingError(f"{token.repr_value} is not a left parenthesis",
token,
[TokenKind.LPAR]))
return None
start_node = NamesNode(start, start + 1, self.parser_input.tokens[start:start + 2])
if not self.parser_input.next_token():
self.add_error(UnexpectedEofNode(f"Unexpected EOF after left parenthesis"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF after left parenthesis"))
return FunctionNode(start_node, None, None)
params = self.parse_parameters()
@@ -249,9 +249,9 @@ class FunctionParser(BaseParser):
token = self.parser_input.token
if not token or token.type != TokenKind.RPAR:
self.add_error(UnexpectedTokenErrorNode(f"Right parenthesis not found",
token,
[TokenKind.RPAR]))
self.add_error(UnexpectedTokenParsingError(f"Right parenthesis not found",
token,
[TokenKind.RPAR]))
return FunctionNode(start_node, None, params)
return FunctionNode(start_node,
@@ -270,7 +270,7 @@ class FunctionParser(BaseParser):
token = self.parser_input.token
if token.type == TokenKind.EOF:
self.add_error(UnexpectedEofNode(f"Unexpected EOF while parsing parameters"))
self.add_error(UnexpectedEofParsingError(f"Unexpected EOF while parsing parameters"))
return None
if token.type == TokenKind.RPAR:
+4 -4
View File
@@ -6,7 +6,7 @@ import core.utils
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind
from parsers.BaseParser import BaseParser, Node, ErrorNode
from parsers.BaseParser import BaseParser, Node, ParsingError
log = logging.getLogger(__name__)
@@ -20,7 +20,7 @@ def get_python_node(obj):
@dataclass()
class PythonErrorNode(ErrorNode):
class PythonErrorNode(ParsingError):
source: str
exception: Exception
@@ -29,7 +29,7 @@ class PythonErrorNode(ErrorNode):
@dataclass()
class ConceptDetected(ErrorNode):
class ConceptDetectedError(ParsingError):
name: str
@@ -138,7 +138,7 @@ class PythonParser(BaseParser):
if isinstance(tree, ast.Expression) and isinstance(tree.body, ast.Name):
if tree.body.id in tracker or context.sheerka.fast_resolve(tree.body.id, return_new=False) is not None:
context.log("It's a simple concept. Not for me.", self.name)
self.error_sink.append(ConceptDetected(tree.body.id))
self.error_sink.append(ConceptDetectedError(tree.body.id))
if self.has_error:
ret = sheerka.ret(
+7 -7
View File
@@ -2,16 +2,16 @@ from core.builtin_concepts import BuiltinConcepts
from core.rule import Rule, ACTION_TYPE_DEFERRED
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind
from parsers.BaseParser import BaseParser, ErrorNode, UnexpectedTokenErrorNode
from parsers.BaseParser import BaseParser, ParsingError, UnexpectedTokenParsingError
class RuleNotFound(ErrorNode):
class RuleNotFoundError(ParsingError):
def __init__(self, id_as_tuple):
self.key = id_as_tuple[0]
self.id = id_as_tuple[1]
def __repr__(self):
return f"RuleNotFound(id={self.id}, key={self.key}"
return f"RuleNotFoundError(id={self.id}, key={self.key}"
class RuleParser(BaseParser):
@@ -54,9 +54,9 @@ class RuleParser(BaseParser):
token = parser_input.token
if parser_input.next_token():
reason = UnexpectedTokenErrorNode("Only one rule supported",
parser_input.token,
[TokenKind.EOF])
reason = UnexpectedTokenParsingError("Only one rule supported",
parser_input.token,
[TokenKind.EOF])
return sheerka.ret(self.name,
False,
sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=parser_input.as_text(), reason=reason))
@@ -76,7 +76,7 @@ class RuleParser(BaseParser):
return sheerka.ret(self.name,
False,
sheerka.new(BuiltinConcepts.ERROR,
body=[RuleNotFound(token.value)]))
body=[RuleNotFoundError(token.value)]))
body = sheerka.new(BuiltinConcepts.PARSER_RESULT,
parser=self,
source=parser_input.as_text(),
+5 -5
View File
@@ -7,7 +7,7 @@ from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import Tokenizer, TokenKind
from core.utils import strip_tokens, make_unique
from parsers.BaseNodeParser import BaseNodeParser, ConceptNode, UnrecognizedTokensNode, SourceCodeNode
from parsers.BaseParser import UnexpectedTokenErrorNode, ErrorNode
from parsers.BaseParser import UnexpectedTokenParsingError, ParsingError
from parsers.BnfNodeParser import BnfNodeParser
from parsers.SyaNodeParser import SyaNodeParser
@@ -15,14 +15,14 @@ PARSERS = [BnfNodeParser.NAME, SyaNodeParser.NAME, "Python"]
@dataclass()
class TokensNodeFound(ErrorNode):
class TokensNodeFoundError(ParsingError):
expected_tokens: list
def __eq__(self, other):
if id(other) == id(self):
return True
if not isinstance(other, UnexpectedTokenErrorNode):
if not isinstance(other, UnexpectedTokenParsingError):
return False
if self.message != other.message:
@@ -96,7 +96,7 @@ class AtomConceptParserHelper:
self.debug.append(token)
if self.expected_tokens[0] != token.strip_quote:
self.errors.append(UnexpectedTokenErrorNode(
self.errors.append(UnexpectedTokenParsingError(
f"Found '{token}' while expecting '{self.expected_tokens[0]}'",
token,
[self.expected_tokens[0]]))
@@ -186,7 +186,7 @@ class AtomConceptParserHelper:
forked.finalize()
if self.expected_tokens:
self.errors.append(TokensNodeFound(self.expected_tokens))
self.errors.append(TokensNodeFoundError(self.expected_tokens))
def clone(self):
clone = AtomConceptParserHelper(self.context)
+15 -15
View File
@@ -14,7 +14,7 @@ from core.tokenizer import Token, TokenKind, Tokenizer
from core.utils import get_n_clones, get_text_from_tokens, NextIdManager
from parsers.BaseNodeParser import UnrecognizedTokensNode, ConceptNode, SourceCodeNode, SyaAssociativity, \
SourceCodeWithConceptNode, BaseNodeParser
from parsers.BaseParser import ErrorNode
from parsers.BaseParser import ParsingError
PARSERS = ["Sequence", "Bnf", "Python"]
@@ -53,7 +53,7 @@ class DebugInfo:
return msg + f" => {self.action}"
class ParenthesisMismatchErrorNode(ErrorNode):
class ParenthesisMismatchError(ParsingError):
def __init__(self, error_int):
if isinstance(error_int, tuple):
@@ -79,7 +79,7 @@ class ParenthesisMismatchErrorNode(ErrorNode):
if id(self) == id(other):
return True
if not isinstance(other, ParenthesisMismatchErrorNode):
if not isinstance(other, ParenthesisMismatchError):
return False
return self.token_value == other.token_value and self.pos == other.pos
@@ -88,11 +88,11 @@ class ParenthesisMismatchErrorNode(ErrorNode):
return hash(self.pos)
def __repr__(self):
return f"ParenthesisMismatchErrorNode('{self.token_value}', {self.pos}"
return f"ParenthesisMismatchError('{self.token_value}', {self.pos}"
@dataclass()
class NoneAssociativeSequenceErrorNode(ErrorNode):
class NoneAssociativeSequenceError(ParsingError):
concept: Concept
first: int
second: int
@@ -100,7 +100,7 @@ class NoneAssociativeSequenceErrorNode(ErrorNode):
@dataclass()
class TooManyParametersFound(ErrorNode):
class TooManyParametersFoundError(ParsingError):
concept: Concept
pos: int # position of the concept
token: Token # token of the concept where the error was noticed
@@ -532,7 +532,7 @@ class InFixToPostFix:
# manage parenthesis that didn't find any match
if self._is_lpar(self.stack[-1]):
self._add_error(ParenthesisMismatchErrorNode(self.stack[-1]))
self._add_error(ParenthesisMismatchError(self.stack[-1]))
# The parameter must be part the current concept being parsed
assert len(self._concepts()) != 0 # sanity check
@@ -560,7 +560,7 @@ class InFixToPostFix:
if self.unrecognized_tokens.parenthesis_count > 0:
# parenthesis mismatch detected, do not try to resolve the unrecognized
self._add_error(ParenthesisMismatchErrorNode(self.unrecognized_tokens))
self._add_error(ParenthesisMismatchError(self.unrecognized_tokens))
self._put_to_out(self.unrecognized_tokens)
else:
# try to recognize concepts
@@ -676,7 +676,7 @@ class InFixToPostFix:
if stack.associativity == SyaAssociativity.No and current.associativity == SyaAssociativity.No:
self._add_error(
NoneAssociativeSequenceErrorNode(current.concept, stack_head.start, sya_parser_helper.start))
NoneAssociativeSequenceError(current.concept, stack_head.start, sya_parser_helper.start))
if current.associativity == SyaAssociativity.Left and current.precedence <= stack.precedence:
if self.debug_enabled:
@@ -719,7 +719,7 @@ class InFixToPostFix:
self.pop_stack_to_out()
if self._is_lpar(self.stack[-1]):
self._add_error(ParenthesisMismatchErrorNode(self.stack[-1]))
self._add_error(ParenthesisMismatchError(self.stack[-1]))
return False
# Manage concepts ending with long names
@@ -777,7 +777,7 @@ class InFixToPostFix:
if len(self.parameters_list) > len(current_concept.parameters_list_at_init):
# we have eaten the parameters expected between two expected tokens
# But there are some remaining parameters
self._add_error(TooManyParametersFound(
self._add_error(TooManyParametersFoundError(
current_concept.concept.concept,
current_concept.start,
token,
@@ -951,7 +951,7 @@ class InFixToPostFix:
# checks consistency if an lpar is found
if len(self.stack) == 0:
self._add_error(ParenthesisMismatchErrorNode((token, pos)))
self._add_error(ParenthesisMismatchError((token, pos)))
return None
if self._stack_isinstance(UnrecognizedTokensNode):
@@ -962,7 +962,7 @@ class InFixToPostFix:
start = i
break
else:
self._add_error(ParenthesisMismatchErrorNode((token, pos)))
self._add_error(ParenthesisMismatchError((token, pos)))
return None
source_code = self._make_source_code_with_concept(start, token, pos)
@@ -1073,7 +1073,7 @@ class InFixToPostFix:
if len(self.stack) == 0 and len(self.out) == 0:
# check for parenthesis mismatch
if self.unrecognized_tokens.parenthesis_count > 0:
self._add_error(ParenthesisMismatchErrorNode(self.unrecognized_tokens))
self._add_error(ParenthesisMismatchError(self.unrecognized_tokens))
return # no need to pop the buffer, as no concept is found
if self.debug_enabled:
@@ -1084,7 +1084,7 @@ class InFixToPostFix:
# validate parenthesis
if self._is_lpar(parser_helper) or self._is_rpar(parser_helper):
self._add_error(ParenthesisMismatchErrorNode(parser_helper))
self._add_error(ParenthesisMismatchError(parser_helper))
return None
self.manage_unrecognized()
+2 -2
View File
@@ -5,7 +5,7 @@ from core.builtin_concepts import BuiltinConcepts
from core.builtin_helpers import only_successful, parse_unrecognized, get_lexer_nodes, update_compiled
from parsers.SequenceNodeParser import SequenceNodeParser
from parsers.BaseNodeParser import ConceptNode, UnrecognizedTokensNode, SourceCodeNode, SourceCodeWithConceptNode
from parsers.BaseParser import BaseParser, ErrorNode
from parsers.BaseParser import BaseParser, ParsingError
from parsers.BnfNodeParser import BnfNodeParser
from parsers.SyaNodeParser import SyaNodeParser
@@ -18,7 +18,7 @@ PARSERS = ["EmptyString",
@dataclass()
class CannotParseNode(ErrorNode):
class CannotParseError(ParsingError):
unrecognized: UnrecognizedTokensNode
-21
View File
@@ -38,14 +38,8 @@ class SheerkaPromptCompleter(Completer):
for name, bound_method in sheerka.sheerka_methods.items():
self.builtins.append(self.get_completion_desc(name, bound_method.method, "builtin", ["context"]))
self.pipeable_builtins = []
for name, pipeable in self.sheerka.sheerka_pipeables.items():
self.pipeable_builtins.append(
self.get_completion_desc(name, pipeable.method, "builtin", ["context", "iterable"]))
self.exit_commands = [CompletionDesc(c, c, "command") for c in EXIT_COMMANDS]
self.globals = {k: v.method for k, v in self.sheerka.sheerka_methods.items()}
self.globals.update({k: v.method for k, v in self.sheerka.sheerka_pipeables.items()})
def get_completions(self, document, complete_event):
@@ -64,14 +58,6 @@ class SheerkaPromptCompleter(Completer):
yield from self.yield_completion_from_completion_desc(self.builtins, text)
return
if self.after_pipe(document.text, document.cursor_position):
if document.char_before_cursor == " ":
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, None)
else:
text = self.last_word(document.text, document.cursor_position)
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, text)
return
yield from self.yield_completion_from_completion_desc(self.builtins, text)
def get_completions_fom_jedi(self, document):
@@ -179,13 +165,6 @@ class SheerkaPromptCompleter(Completer):
return None
@staticmethod
def after_pipe(text, pos):
for i in range(pos)[::-1]:
if text[i] == "|":
return True
return False
@staticmethod
def last_word(text, pos, left_strip=True):
if pos == 0: