Added first version of DebugManager. Implemented draft of the rule engine

This commit is contained in:
2020-11-20 13:41:45 +01:00
parent cd066881b4
commit 315f8ea09b
156 changed files with 8388 additions and 2852 deletions
+98 -6
View File
@@ -2,8 +2,8 @@ from dataclasses import dataclass
import core.utils
import pytest
from core.concept import ConceptParts, Concept
from core.tokenizer import Token, TokenKind
from core.concept import Concept
from core.tokenizer import Token, TokenKind, Tokenizer, Keywords
@dataclass
@@ -15,6 +15,12 @@ class Obj:
return hash((self.prop1, self.prop1))
@dataclass
class Obj2:
prop1: object
prop2: object
def get_tokens(lst):
res = []
for e in lst:
@@ -134,11 +140,25 @@ def test_i_can_product(a, b, expected):
])
def test_i_can_strip(input_as_list, expected_as_list):
actual = core.utils.strip_tokens(get_tokens(input_as_list))
actual = core.utils.strip_tokens(get_tokens(input_as_list)) # KSI 20201007 Why not use Tokenizer ?!! For perf ?
expected = get_tokens(expected_as_list)
assert actual == expected
@pytest.mark.parametrize("text, value, expected", [
("xxx=yyy", "=", 1),
("xxx", "=", -1),
("xxx = yyy", "=", 2),
("xxx = yyy", " = ", -1),
])
def test_i_can_index(text, value, expected):
assert core.utils.index_tokens(Tokenizer(text), value) == expected
def test_i_can_manage_non_in_index_tokens():
assert core.utils.index_tokens(None, "=") == -1
def test_by_default_eof_is_not_stripped():
actual = core.utils.strip_tokens(get_tokens(["one", "two", " ", "\n", "<EOF>"]))
expected = get_tokens(["one", "two", " ", "\n", "<EOF>"])
@@ -176,6 +196,16 @@ def test_i_can_unstr_concept(text, expected_key, expected_id):
assert i == expected_id
@pytest.mark.parametrize("text, expected_key, expected_id", [
("r:key:", "key", None),
("r:key|id:", "key", "id"),
])
def test_i_can_unstr_concept_rules(text, expected_key, expected_id):
k, i = core.utils.unstr_concept(text, prefix="r:")
assert k == expected_key
assert i == expected_id
def test_i_can_str_concept():
assert core.utils.str_concept(("key", "id")) == "c:key|id:"
assert core.utils.str_concept((None, "id")) == "c:|id:"
@@ -186,10 +216,12 @@ def test_i_can_str_concept():
concept = Concept("foo").init_key()
assert core.utils.str_concept(concept) == "c:foo:"
concept.metadata.id = "1001"
concept.get_metadata().id = "1001"
assert core.utils.str_concept(concept) == "c:foo|1001:"
assert core.utils.str_concept(concept, drop_name=True) == "c:|1001:"
assert core.utils.str_concept(("key", "id"), prefix='r:') == "r:key|id:"
@pytest.mark.parametrize("text, expected", [
(None, None),
@@ -198,7 +230,7 @@ def test_i_can_str_concept():
("xxx", None),
("xxx.", None),
("xxx.yyy", None),
("core.concept.ConceptParts.BODY", ConceptParts.BODY),
("core.tokenizer.Keywords.CONCEPT", Keywords.CONCEPT),
])
def test_i_can_decode_enum(text, expected):
actual = core.utils.decode_enum(text)
@@ -214,15 +246,24 @@ def test_encode_concept_key_id():
concept = Concept("foo").init_key()
assert core.utils.encode_concept(concept) == "__C__KEY_foo__ID_00None00__C__"
concept.metadata.id = "1001"
concept.get_metadata().id = "1001"
assert core.utils.encode_concept(concept) == "__C__KEY_foo__ID_1001__C__"
assert core.utils.encode_concept(("key", "id"), "R") == "__R__KEY_key__ID_id__R__"
assert core.utils.encode_concept((None, "id"), "R") == "__R__KEY_00None00__ID_id__R__"
assert core.utils.encode_concept(("key", None), "R") == "__R__KEY_key__ID_00None00__R__"
assert core.utils.encode_concept(("k + y", "id"), "R") == "__R__KEY_k000y__ID_id__R__"
def test_decode_concept_key_id():
assert core.utils.decode_concept("__C__KEY_key__ID_id__C__") == ("key", "id")
assert core.utils.decode_concept("__C__KEY_00None00__ID_id__C__") == (None, "id")
assert core.utils.decode_concept("__C__KEY_key__ID_00None00__C__") == ("key", None)
assert core.utils.decode_concept("__R__KEY_key__ID_id__R__", "R") == ("key", "id")
assert core.utils.decode_concept("__R__KEY_00None00__ID_id__R__", "R") == (None, "id")
assert core.utils.decode_concept("__R__KEY_key__ID_00None00__R__", "R") == ("key", None)
@pytest.mark.parametrize("a,b,expected", [
([], [], []),
@@ -246,3 +287,54 @@ def test_i_can_make_unique():
assert core.utils.make_unique(["a", "a", "b", "c", "c"]) == ["a", "b", "c"]
assert core.utils.make_unique([Obj("a", "b"), Obj("a", "c"), Obj("a", "b")]) == [Obj("a", "b"), Obj("a", "c")]
assert core.utils.make_unique([Obj("a", "b"), Obj("a", "c")], lambda o: o.prop1) == [Obj("a", "b")]
@pytest.mark.parametrize("expression, bag, expected", [
("", {}, None),
(None, {}, None),
("a", {"a": 1}, 1),
("a.x", {"a.x": 1}, 1),
("a.prop1", {"a": Obj("prop1", "prop2")}, "prop1"),
("a.prop1.prop2.prop1", {"a": Obj2(Obj2("prop11", Obj2("prop121", "prop122")), "2")}, "prop121"),
("a.prop1.prop2.prop1", {"a": Obj2(Obj2("prop11", None), "2")}, None),
("a[1]", {"a": ['lst-first', 'lst-second']}, 'lst-second'),
("a['bar']", {"a": {'foo': 'dict-first', 'bar': 'dict-second'}}, 'dict-second'),
("a.prop1[0]", {"a": Obj2(['lst-first', 'lst-second'], None)}, 'lst-first'),
("a.prop2['bar']", {"a": Obj2(None, {'foo': 'dict-first', 'bar': 'dict-second'})}, 'dict-second'),
("a.bar", {"a": {'foo': 'dict-first', 'bar': 'dict-second'}}, 'dict-second'),
("a.prop2.bar", {"a": Obj2(None, {'foo': 'dict-first', 'bar': 'dict-second'})}, 'dict-second'),
])
def test_i_can_evaluate_expression(expression, bag, expected):
assert core.utils.evaluate_expression(expression, bag) == expected
@pytest.mark.parametrize("expression, bag, expected_error, prop_name", [
("a", {}, NameError, "a"),
("a.prop3", {"a": Obj("prop1", "prop2")}, NameError, "prop3"),
])
def test_i_cannot_evaluate_expression(expression, bag, expected_error, prop_name):
with pytest.raises(expected_error) as e:
core.utils.evaluate_expression(expression, bag)
assert e.value.args == (prop_name,)
@pytest.mark.parametrize("text, expected_text", [
("hello world", "hello world"),
("'hello' 'world'", "'hello' 'world'"),
("def concept a from", "def concept a from"),
("()[]{}1=1.5+-/*><&é", "()[]{}1=1.5+-/*><&é"),
("execute(c:concept_name:)", "execute(c:concept_name:)")
])
def test_i_can_get_text_from_tokens(text, expected_text):
tokens = list(Tokenizer(text))
assert core.utils.get_text_from_tokens(tokens) == expected_text
@pytest.mark.parametrize("text, custom, expected_text", [
("execute(c:concept_name:)", {TokenKind.CONCEPT: lambda t: f"__C__{t.value[0]}"}, "execute(__C__concept_name)")
])
def test_i_can_get_text_from_tokens_with_custom_switcher(text, custom, expected_text):
tokens = list(Tokenizer(text))
assert core.utils.get_text_from_tokens(tokens, custom) == expected_text