Fixed #55 : DefConceptParser: failed to recognize concept

Fixed #62 : DefConceptParser: parsing error
Fixed #64 : DefConceptParser: Failed to parse when too many concept keyword
Fixed #65 : DefConceptParser : Add auto_eval keyword
Fixed #66 : DefConceptParser : Add def_var keyword
Fixed #67 : Add get_errors()
This commit is contained in:
2021-04-13 15:15:17 +02:00
parent 81e67147e9
commit bef5f3208c
17 changed files with 838 additions and 235 deletions
+26 -10
View File
@@ -7,14 +7,23 @@ from parsers.BaseParser import UnexpectedEofParsingError, UnexpectedTokenParsing
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
def get_tokens(items):
return [list(Tokenizer(item, yield_eof=False))[0] for item in items]
class TestBaseCustomGrammarParser(TestUsingMemoryBasedSheerka):
@staticmethod
def compare_results(actual, expected, compare_str=False):
resolved_expected = {}
for k, v in expected.items():
tokens = list(Tokenizer(v, yield_eof=False))
resolved_expected[k] = [tokens[0]] + tokens[2:]
if isinstance(v, str):
# case like {Keywords.DEF_VAR: "def_var var1 def_var var2"}
tokens = list(Tokenizer(v, yield_eof=False))
resolved_expected[k] = [tokens[0]] + tokens[2:]
else:
# case like {Keywords.DEF_VAR: get_tokens(["def_var", "var1", "var2"])}
resolved_expected[k] = v
def get_better_representation(value):
better_repr = {}
@@ -23,6 +32,7 @@ class TestBaseCustomGrammarParser(TestUsingMemoryBasedSheerka):
better_repr[k] = [tokens[0].repr_value, value]
return better_repr
# it's easier to compare two list of string
actual_to_compare = get_better_representation(actual)
expected_to_compare = get_better_representation(resolved_expected)
@@ -67,6 +77,20 @@ func(a)
res = parser.get_parts(["when"])
self.compare_results(res, expected)
@pytest.mark.parametrize("text, allow_multiple, expected", [
("def_var var1 def_var var2", {}, {Keywords.DEF_VAR: "def_var var1 def_var var2"}),
("def_var var1 def_var var2", {"def_var"}, {Keywords.DEF_VAR: get_tokens(["def_var", "var1", "var2"])}),
("def_var x y z def_var var2", {"def_var"}, {Keywords.DEF_VAR: get_tokens(["def_var", "'x y z'", "var2"])}),
("def_var 'x y z' def_var var2", {"def_var"}, {Keywords.DEF_VAR: get_tokens(["def_var", "'x y z'", "var2"])}),
("def_var var1 def_var x y z def_var var2", {"def_var"},
{Keywords.DEF_VAR: get_tokens(["def_var", "var1", "'x y z'", "var2"])}),
])
def test_i_can_get_parts_when_allow_multiple_is_set(self, text, allow_multiple, expected):
sheerka, context, parser = self.init_parser(text)
res = parser.get_parts(["def_var"], allow_multiple=allow_multiple)
self.compare_results(res, expected)
@pytest.mark.parametrize("text", [
"",
"no keyword",
@@ -88,14 +112,6 @@ func(a)
"when",
[Keywords.PRINT])]
def test_i_can_detect_when_a_keyword_appears_several_times(self):
sheerka, context, parser = self.init_parser("print hello when True print True")
parser.get_parts(["print"])
assert len(parser.error_sink) == 1
assert isinstance(parser.error_sink[0], SyntaxErrorNode)
assert parser.error_sink[0].message == "Too many 'print' declarations."
@pytest.mark.parametrize("text", [
"print",
"print ",