@@ -0,0 +1,122 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
import pytest
|
||||
|
||||
from common.utils import decode_enum, get_class, to_dict, str_concept, unstr_concept
|
||||
from helpers import get_concept
|
||||
from parsers.tokenizer import Keywords, Token, TokenKind
|
||||
|
||||
|
||||
@dataclass
|
||||
class Obj:
|
||||
prop1: str
|
||||
prop2: str
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self.prop1, self.prop1))
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, Obj):
|
||||
return False
|
||||
|
||||
return self.prop1 == other.prop1 and self.prop2 == other.prop2
|
||||
|
||||
|
||||
@dataclass
|
||||
class Obj2:
|
||||
prop1: object
|
||||
prop2: object
|
||||
|
||||
|
||||
def get_tokens(lst):
|
||||
res = []
|
||||
for e in lst:
|
||||
if e == " ":
|
||||
res.append(Token(TokenKind.WHITESPACE, " ", 0, 0, 0))
|
||||
elif e == "\n":
|
||||
res.append(Token(TokenKind.NEWLINE, "\n", 0, 0, 0))
|
||||
elif e == "<EOF>":
|
||||
res.append(Token(TokenKind.EOF, "\n", 0, 0, 0))
|
||||
else:
|
||||
res.append(Token(TokenKind.IDENTIFIER, e, 0, 0, 0))
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def test_i_can_get_class():
|
||||
# example of classes that should be in the result
|
||||
create_parser_input = get_class("evaluators.CreateParserInput.CreateParserInput")
|
||||
|
||||
assert isinstance(create_parser_input, type)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected_key, expected_id", [
|
||||
(None, None, None),
|
||||
(10, None, None),
|
||||
("", None, None),
|
||||
("xxx", None, None),
|
||||
("c:", None, None),
|
||||
("c:key", None, None),
|
||||
("c:key:", "key", None),
|
||||
("c:key#id", None, None),
|
||||
("c:key#id:", "key", "id"),
|
||||
("c:#id:", None, "id"),
|
||||
("c:key#:", "key", None),
|
||||
("c:key#id:x", None, None),
|
||||
("c:one: plus c:two:", None, None),
|
||||
("c:one#id: plus c:two:", None, None),
|
||||
])
|
||||
def test_i_can_unstr_concept(text, expected_key, expected_id):
|
||||
k, i = unstr_concept(text)
|
||||
assert k == expected_key
|
||||
assert i == expected_id
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected_key, expected_id", [
|
||||
("r:key:", "key", None),
|
||||
("r:key#id:", "key", "id"),
|
||||
])
|
||||
def test_i_can_unstr_concept_rules(text, expected_key, expected_id):
|
||||
k, i = unstr_concept(text, prefix="r:")
|
||||
assert k == expected_key
|
||||
assert i == expected_id
|
||||
|
||||
|
||||
def test_i_can_str_concept():
|
||||
assert str_concept(("key", "id")) == "c:key#id:"
|
||||
assert str_concept((None, "id")) == "c:#id:"
|
||||
assert str_concept(("key", None)) == "c:key:"
|
||||
assert str_concept((None, None)) == ""
|
||||
assert str_concept(("key", "id"), drop_name=True) == "c:#id:"
|
||||
|
||||
concept = get_concept("foo")
|
||||
assert str_concept(concept) == "c:foo:"
|
||||
|
||||
concept.get_metadata().id = "1001"
|
||||
assert str_concept(concept) == "c:foo#1001:"
|
||||
assert str_concept(concept, drop_name=True) == "c:#1001:"
|
||||
|
||||
assert str_concept(("key", "id"), prefix='r:') == "r:key#id:"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
(None, None),
|
||||
(10, None),
|
||||
("", None),
|
||||
("xxx", None),
|
||||
("xxx.", None),
|
||||
("xxx.yyy", None),
|
||||
("parsers.tokenizer.Keywords.CONCEPT", Keywords.CONCEPT),
|
||||
])
|
||||
def test_i_can_decode_enum(text, expected):
|
||||
actual = decode_enum(text)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("items, expected", [
|
||||
([], {}),
|
||||
([Obj("a", "1"), Obj("a", "2"), Obj("b", "3")], {"a": [Obj("a", "1"), Obj("a", "2")],
|
||||
"b": [Obj("b", "3")]}),
|
||||
])
|
||||
def test_i_can_to_dict(items, expected):
|
||||
assert to_dict(items, lambda obj: obj.prop1) == expected
|
||||
Reference in New Issue
Block a user