Refactored sheerka class: splitted to use sub handlers. Refactored unit tests to use classes.
This commit is contained in:
@@ -0,0 +1,145 @@
|
||||
import core.utils
|
||||
import pytest
|
||||
|
||||
from core.tokenizer import Token, TokenKind
|
||||
|
||||
|
||||
@pytest.mark.parametrize("lst, as_string", [
|
||||
(None, "",),
|
||||
([], ""),
|
||||
(["hello", "world"], "hello world"),
|
||||
# (["hello world", "my friend"], '"hello world" "my friend"')
|
||||
])
|
||||
def test_i_can_create_string_from_a_list(lst, as_string):
|
||||
assert core.utils.sysarg_to_string(lst) == as_string
|
||||
|
||||
|
||||
def test_i_can_get_classes():
|
||||
classes = list(core.utils.get_classes("core.builtin_concepts"))
|
||||
error_concept = core.utils.get_class("core.builtin_concepts.ErrorConcept")
|
||||
return_value_concept = core.utils.get_class("core.builtin_concepts.ReturnValueConcept")
|
||||
|
||||
assert len(classes) > 2
|
||||
assert error_concept in classes
|
||||
assert return_value_concept in classes
|
||||
|
||||
|
||||
def test_i_can_get_base_classes():
|
||||
classes = list(core.utils.get_classes_from_package("parsers"))
|
||||
|
||||
# example of classes that should be in the result
|
||||
base_parser = core.utils.get_class("parsers.BaseParser.BaseParser")
|
||||
default_parser = core.utils.get_class("parsers.DefaultParser.DefaultParser")
|
||||
exact_concept_parser = core.utils.get_class("parsers.ExactConceptParser.ExactConceptParser")
|
||||
python_parser = core.utils.get_class("parsers.PythonParser.PythonParser")
|
||||
node = core.utils.get_class("parsers.BaseParser.Node")
|
||||
def_concept_node = core.utils.get_class("parsers.DefaultParser.DefConceptNode")
|
||||
python_node = core.utils.get_class("parsers.PythonParser.PythonNode")
|
||||
|
||||
assert base_parser in classes
|
||||
assert default_parser in classes
|
||||
assert exact_concept_parser in classes
|
||||
assert python_parser in classes
|
||||
assert node in classes
|
||||
assert def_concept_node in classes
|
||||
assert python_node in classes
|
||||
|
||||
|
||||
def test_i_can_get_sub_classes():
|
||||
sub_classes = core.utils.get_sub_classes("parsers", "parsers.BaseParser.BaseParser")
|
||||
|
||||
# example of classes that should be (or not) in the result
|
||||
base_parser = core.utils.get_class("parsers.BaseParser.BaseParser")
|
||||
default_parser = core.utils.get_class("parsers.DefaultParser.DefaultParser")
|
||||
exact_concept_parser = core.utils.get_class("parsers.ExactConceptParser.ExactConceptParser")
|
||||
python_parser = core.utils.get_class("parsers.PythonParser.PythonParser")
|
||||
concept_lexer_parser = core.utils.get_class("parsers.ConceptLexerParser.ConceptLexerParser")
|
||||
|
||||
assert base_parser not in sub_classes
|
||||
assert default_parser in sub_classes
|
||||
assert exact_concept_parser in sub_classes
|
||||
assert python_parser in sub_classes
|
||||
assert concept_lexer_parser in sub_classes
|
||||
|
||||
|
||||
@pytest.mark.parametrize("a,b, expected", [
|
||||
([], [], []),
|
||||
([], ['a'], ['a']),
|
||||
([[]], ['a'], [['a']]),
|
||||
(['a'], [], ['a']),
|
||||
([['a']], [], [['a']]),
|
||||
|
||||
([['a']], ['b'], [['a', 'b']]),
|
||||
([['a'], ['b']], ['c'], [['a', 'c'], ['b', 'c']]),
|
||||
([['a1', 'a2'], ['b1', 'b2', 'b3']], ['c'], [['a1', 'a2', 'c'], ['b1', 'b2', 'b3', 'c']]),
|
||||
([[]], ['a', 'b'], [['a'], ['b']]),
|
||||
([['a'], ['b']], ['c', 'd', 'e'], [['a', 'c'], ['b', 'c'], ['a', 'd'], ['b', 'd'], ['a', 'e'], ['b', 'e']]),
|
||||
])
|
||||
def test_i_can_product(a, b, expected):
|
||||
res = core.utils.product(a, b)
|
||||
assert res == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("input_as_list, expected_as_list", [
|
||||
([" "], []),
|
||||
([" ", "one"], ["one"]),
|
||||
(["one", " "], ["one"]),
|
||||
([" ", "one", " "], ["one"]),
|
||||
|
||||
(["\n", "one"], ["one"]),
|
||||
(["one", "\n"], ["one"]),
|
||||
(["\n", "one", "\n"], ["one"]),
|
||||
|
||||
([" ", "\n", "one"], ["one"]),
|
||||
(["one", " ", "\n"], ["one"]),
|
||||
([" ", "\n", "one", " ", "\n"], ["one"]),
|
||||
|
||||
(["\n", " ", "one"], ["one"]),
|
||||
(["one", "\n", " "], ["one"]),
|
||||
(["\n", " ", "one", "\n", " "], ["one"]),
|
||||
|
||||
([" ", "\n", " ", "one"], ["one"]),
|
||||
(["one", " ", "\n", " "], ["one"]),
|
||||
([" ", "\n", " ", "one", " ", "\n", " "], ["one"]),
|
||||
|
||||
(["\n", " ", "\n", "one"], ["one"]),
|
||||
(["one", "\n", " ", "\n"], ["one"]),
|
||||
(["\n", " ", "\n", "one", "\n", " ", "\n"], ["one"]),
|
||||
|
||||
])
|
||||
def test_i_can_strip(input_as_list, expected_as_list):
|
||||
actual = core.utils.strip_tokens(get_tokens(input_as_list))
|
||||
expected = get_tokens(expected_as_list)
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_by_default_eof_is_not_stripped():
|
||||
actual = core.utils.strip_tokens(get_tokens(["one", "two", " ", "\n", "<EOF>"]))
|
||||
expected = get_tokens(["one", "two", " ", "\n", "<EOF>"])
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_i_can_strip_eof():
|
||||
actual = core.utils.strip_tokens(get_tokens(["one", "two", " ", "\n", "<EOF>"]), True)
|
||||
expected = get_tokens(["one", "two"])
|
||||
assert actual == expected
|
||||
|
||||
|
||||
def test_i_can_escape():
|
||||
actual = core.utils.escape_char("hello 'world' my friend", "'")
|
||||
assert actual == "hello \\'world\\' my friend"
|
||||
|
||||
|
||||
def get_tokens(lst):
|
||||
res = []
|
||||
for e in lst:
|
||||
if e == " ":
|
||||
res.append(Token(TokenKind.WHITESPACE, " ", 0, 0, 0))
|
||||
elif e == "\n":
|
||||
res.append(Token(TokenKind.NEWLINE, "\n", 0, 0, 0))
|
||||
elif e == "<EOF>":
|
||||
res.append(Token(TokenKind.EOF, "\n", 0, 0, 0))
|
||||
else:
|
||||
res.append(Token(TokenKind.IDENTIFIER, e, 0, 0, 0))
|
||||
|
||||
return res
|
||||
Reference in New Issue
Block a user