import core.utils import pytest from core.concept import ConceptParts from core.tokenizer import Token, TokenKind @pytest.mark.parametrize("lst, as_string", [ (None, "",), ([], ""), (["hello", "world"], "hello world"), # (["hello world", "my friend"], '"hello world" "my friend"') ]) def test_i_can_create_string_from_a_list(lst, as_string): assert core.utils.sysarg_to_string(lst) == as_string def test_i_can_get_classes(): classes = list(core.utils.get_classes("core.builtin_concepts")) error_concept = core.utils.get_class("core.builtin_concepts.ErrorConcept") return_value_concept = core.utils.get_class("core.builtin_concepts.ReturnValueConcept") assert len(classes) > 2 assert error_concept in classes assert return_value_concept in classes def test_i_can_get_base_classes(): classes = list(core.utils.get_classes_from_package("parsers")) # example of classes that should be in the result base_parser = core.utils.get_class("parsers.BaseParser.BaseParser") default_parser = core.utils.get_class("parsers.DefaultParser.DefaultParser") exact_concept_parser = core.utils.get_class("parsers.ExactConceptParser.ExactConceptParser") python_parser = core.utils.get_class("parsers.PythonParser.PythonParser") node = core.utils.get_class("parsers.BaseParser.Node") def_concept_node = core.utils.get_class("parsers.DefaultParser.DefConceptNode") python_node = core.utils.get_class("parsers.PythonParser.PythonNode") assert base_parser in classes assert default_parser in classes assert exact_concept_parser in classes assert python_parser in classes assert node in classes assert def_concept_node in classes assert python_node in classes def test_i_can_get_sub_classes(): sub_classes = core.utils.get_sub_classes("parsers", "parsers.BaseParser.BaseParser") # example of classes that should be (or not) in the result base_parser = core.utils.get_class("parsers.BaseParser.BaseParser") default_parser = core.utils.get_class("parsers.DefaultParser.DefaultParser") exact_concept_parser = core.utils.get_class("parsers.ExactConceptParser.ExactConceptParser") python_parser = core.utils.get_class("parsers.PythonParser.PythonParser") concept_lexer_parser = core.utils.get_class("parsers.ConceptLexerParser.ConceptLexerParser") assert base_parser not in sub_classes assert default_parser in sub_classes assert exact_concept_parser in sub_classes assert python_parser in sub_classes assert concept_lexer_parser in sub_classes @pytest.mark.parametrize("a,b, expected", [ ([], [], []), ([], ['a'], ['a']), ([[]], ['a'], [['a']]), (['a'], [], ['a']), ([['a']], [], [['a']]), ([['a']], ['b'], [['a', 'b']]), ([['a'], ['b']], ['c'], [['a', 'c'], ['b', 'c']]), ([['a1', 'a2'], ['b1', 'b2', 'b3']], ['c'], [['a1', 'a2', 'c'], ['b1', 'b2', 'b3', 'c']]), ([[]], ['a', 'b'], [['a'], ['b']]), ([['a'], ['b']], ['c', 'd', 'e'], [['a', 'c'], ['b', 'c'], ['a', 'd'], ['b', 'd'], ['a', 'e'], ['b', 'e']]), ]) def test_i_can_product(a, b, expected): res = core.utils.product(a, b) assert res == expected @pytest.mark.parametrize("input_as_list, expected_as_list", [ ([" "], []), ([" ", "one"], ["one"]), (["one", " "], ["one"]), ([" ", "one", " "], ["one"]), (["\n", "one"], ["one"]), (["one", "\n"], ["one"]), (["\n", "one", "\n"], ["one"]), ([" ", "\n", "one"], ["one"]), (["one", " ", "\n"], ["one"]), ([" ", "\n", "one", " ", "\n"], ["one"]), (["\n", " ", "one"], ["one"]), (["one", "\n", " "], ["one"]), (["\n", " ", "one", "\n", " "], ["one"]), ([" ", "\n", " ", "one"], ["one"]), (["one", " ", "\n", " "], ["one"]), ([" ", "\n", " ", "one", " ", "\n", " "], ["one"]), (["\n", " ", "\n", "one"], ["one"]), (["one", "\n", " ", "\n"], ["one"]), (["\n", " ", "\n", "one", "\n", " ", "\n"], ["one"]), ]) def test_i_can_strip(input_as_list, expected_as_list): actual = core.utils.strip_tokens(get_tokens(input_as_list)) expected = get_tokens(expected_as_list) assert actual == expected def test_by_default_eof_is_not_stripped(): actual = core.utils.strip_tokens(get_tokens(["one", "two", " ", "\n", ""])) expected = get_tokens(["one", "two", " ", "\n", ""]) assert actual == expected def test_i_can_strip_eof(): actual = core.utils.strip_tokens(get_tokens(["one", "two", " ", "\n", ""]), True) expected = get_tokens(["one", "two"]) assert actual == expected def test_i_can_escape(): actual = core.utils.escape_char("hello 'world' my friend", "'") assert actual == "hello \\'world\\' my friend" @pytest.mark.parametrize("text, expected_key, expected_id", [ (None, None, None), (10, None, None), ("", None, None), ("xxx", None, None), (":c:", None, None), (":c:key", None, None), (":c:key:", "key", None), (":c:key:id", None, None), (":c:key:id:", "key", "id"), ]) def test_i_can_decode_concept_repr(text, expected_key, expected_id): k, i = core.utils.decode_concept(text) assert k == expected_key assert i == expected_id @pytest.mark.parametrize("text, expected", [ (None, None), (10, None), ("", None), ("xxx", None), ("xxx.", None), ("xxx.yyy", None), ("core.concept.ConceptParts.BODY", ConceptParts.BODY), ]) def test_i_can_decode_enum(text, expected): actual = core.utils.decode_enum(text) assert actual == expected def get_tokens(lst): res = [] for e in lst: if e == " ": res.append(Token(TokenKind.WHITESPACE, " ", 0, 0, 0)) elif e == "\n": res.append(Token(TokenKind.NEWLINE, "\n", 0, 0, 0)) elif e == "": res.append(Token(TokenKind.EOF, "\n", 0, 0, 0)) else: res.append(Token(TokenKind.IDENTIFIER, e, 0, 0, 0)) return res