Fixed #18 : Parsing and evaluating Python
This commit is contained in:
@@ -2,9 +2,9 @@ from dataclasses import dataclass
|
||||
|
||||
import pytest
|
||||
|
||||
from common.utils import decode_enum, get_class, to_dict, str_concept, unstr_concept
|
||||
from common.utils import decode_enum, dict_product, get_class, get_text_from_tokens, str_concept, to_dict, unstr_concept
|
||||
from helpers import get_concept
|
||||
from parsers.tokenizer import Keywords, Token, TokenKind
|
||||
from parsers.tokenizer import Keywords, Token, TokenKind, Tokenizer
|
||||
|
||||
|
||||
@dataclass
|
||||
@@ -120,3 +120,55 @@ def test_i_can_decode_enum(text, expected):
|
||||
])
|
||||
def test_i_can_to_dict(items, expected):
|
||||
assert to_dict(items, lambda obj: obj.prop1) == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected_text", [
|
||||
("hello world", "hello world"),
|
||||
("'hello' 'world'", "'hello' 'world'"),
|
||||
("def concept a from", "def concept a from"),
|
||||
("()[]{}1=1.5+-/*><&é", "()[]{}1=1.5+-/*><&é"),
|
||||
("execute(c:concept_name:)", "execute(c:concept_name:)")
|
||||
|
||||
])
|
||||
def test_i_can_get_text_from_tokens(text, expected_text):
|
||||
tokens = list(Tokenizer(text))
|
||||
assert get_text_from_tokens(tokens) == expected_text
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, custom, expected_text", [
|
||||
("execute(c:concept_name:)", {TokenKind.CONCEPT: lambda t: f"__C__{t.value[0]}"}, "execute(__C__concept_name)")
|
||||
])
|
||||
def test_i_can_get_text_from_tokens_with_custom_switcher(text, custom, expected_text):
|
||||
tokens = list(Tokenizer(text))
|
||||
assert get_text_from_tokens(tokens, custom) == expected_text
|
||||
|
||||
|
||||
def test_i_can_track_tokens():
|
||||
text = "execute(c:name1: if r:#id: else c:name2:)"
|
||||
switcher = {TokenKind.CONCEPT: lambda t: f"__CONCEPT__{t.value[0]}",
|
||||
TokenKind.RULE: lambda t: f"__RULE__{t.value[1]}"}
|
||||
tracker = {}
|
||||
tokens = list(Tokenizer(text))
|
||||
get_text_from_tokens(tokens, switcher, tracker)
|
||||
assert len(tracker) == 3
|
||||
assert tracker["__CONCEPT__name1"] == tokens[2]
|
||||
assert tracker["__RULE__id"] == tokens[6]
|
||||
assert tracker["__CONCEPT__name2"] == tokens[10]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,b,expected", [
|
||||
([], [], []),
|
||||
([{"a": "a", "b": "b"}], [], [{"a": "a", "b": "b"}]),
|
||||
([], [{"a": "a", "b": "b"}], [{"a": "a", "b": "b"}]),
|
||||
([{"a": "a", "b": "b"}], [{"d": "d1"}, {"d": "d2"}], [{"a": "a", "b": "b", "d": "d1"},
|
||||
{"a": "a", "b": "b", "d": "d2"}]),
|
||||
([{"d": "d1"}, {"d": "d2"}], [{"a": "a", "b": "b"}], [{"a": "a", "b": "b", "d": "d1"},
|
||||
{"a": "a", "b": "b", "d": "d2"}]),
|
||||
([{"a": "a", "b": "b"}], [{"d": "d", "e": "e"}], [{"a": "a", "b": "b", "d": "d", "e": "e"}]),
|
||||
([{"a": "a"}, {"b": "b"}], [{"d": "d"}, {"e": "e"}], [{"a": "a", "d": "d"},
|
||||
{"a": "a", "e": "e"},
|
||||
{"b": "b", "d": "d"},
|
||||
{"b": "b", "e": "e"}])
|
||||
])
|
||||
def test_dict_product(a, b, expected):
|
||||
assert dict_product(a, b) == expected
|
||||
|
||||
Reference in New Issue
Block a user