Files
Sheerka-Old/tests/core/test_ParserInput.py
kodjo 89e1f20975 Fixed #131 : Implement ExprToConditions
Fixed #130 : ArithmeticOperatorParser
Fixed #129 : python_wrapper : create_namespace
Fixed #128 : ExpressionParser: Cannot parse func(x) infixed concept 'xxx'
2021-10-13 16:06:57 +02:00

179 lines
5.8 KiB
Python

import pytest
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind, Tokenizer
@pytest.mark.parametrize("text, start, end, expected", [
("def concept a", None, None, "def concept a"),
("&é#(-è_çà)='string'", None, None, "&é#(-è_çà)='string'"),
("def concept a", 2, None, "concept a"),
("def concept a", 0, 2, "def concept"),
])
def test_i_can_use_parser_input(text, start, end, expected):
parser_input = ParserInput(text, start=start, end=end).reset()
assert parser_input.as_text() == expected
def test_i_can_get_the_next_token_when_yield_eof_is_activated():
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=True).reset()
res = []
parser_input.next_token()
while True:
res.append(f"{parser_input.token.repr_value}")
if parser_input.token.type == TokenKind.EOF:
break
parser_input.next_token()
expected = ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'", '<EOF>']
assert res == expected
def test_i_can_get_the_next_token_when_yield_eof_is_deactivated():
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset()
res = []
while parser_input.next_token():
res.append(f"{parser_input.token.repr_value}")
expected = ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'"]
assert res == expected
def test_i_can_get_the_next_token_when_start_and_end_are_provided():
parser_input = ParserInput("def concept a concept name from bnf 'xyz' as 'xyz'", start=4, end=9).reset()
res = []
while parser_input.next_token(skip_whitespace=False):
res.append(f"{parser_input.token.repr_value}")
assert res == ['a', '<ws>', 'concept', '<ws>', 'name', '<ws>']
def test_i_can_get_next_token_when_yield_eof_is_false():
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset()
res = []
while parser_input.next_token():
res.append(f"{parser_input.token.repr_value}")
assert res == ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'"]
def test_i_can_override_yield_oef_within_reset():
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset(yield_oef=True)
res = []
parser_input.next_token()
while True:
res.append(f"{parser_input.token.repr_value}")
if parser_input.token.type == TokenKind.EOF:
break
parser_input.next_token()
assert res == ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'", "<EOF>"]
assert not parser_input.yield_oef
@pytest.mark.parametrize("list_has_eof, parser_has_eof, reset_has_eof", [
(True, True, True),
(True, False, True),
(False, True, True),
(False, False, True),
(True, True, False),
(True, False, False),
(False, True, False),
(False, False, False),
])
def test_i_can_get_the_next_token_when_initialised_with_tokens(list_has_eof, parser_has_eof, reset_has_eof):
tokens = list(Tokenizer(" def concept a as 'xyz' ", yield_eof=list_has_eof))
parser_input = ParserInput(" def concept a as 'xyz' ", tokens, yield_oef=parser_has_eof).reset()
parser_input.reset(reset_has_eof)
res = []
while parser_input.next_token():
res.append(f"{parser_input.token.repr_value}")
expected = ['def', 'concept', 'a', 'as', "'xyz'"]
if reset_has_eof:
expected.append("<EOF>")
assert res == expected
def test_i_can_parse_twice():
text = """
def concept a + b
where isinstance(a, int) and isinstance(b, int)
pre isinstance(a, int) and isinstance(b, int)
post isinstance(res, int)
as:
def func(x,y):
return x+y
func(a,b)
"""
p1 = ParserInput(text).reset()
while p1.next_token():
pass
p1.reset()
p2 = ParserInput(text).reset()
while p1.next_token():
p2.next_token()
assert p1.token == p2.token
p1.reset()
p2 = ParserInput(text).reset()
while p2.next_token():
p1.next_token()
assert p1.token == p2.token
@pytest.mark.parametrize("text, skip_whitespace, expected", [
("first second", True, "second"),
("first second", False, "<ws>"),
("first", True, "<EOF>"),
("first", False, "<EOF>"),
("first ", True, "<EOF>"),
("first ", False, "<ws>"),
("first:", True, ":"),
("first:", False, ":"),
])
def test_i_can_get_the_token_after(text, skip_whitespace, expected):
parser_input = ParserInput(text).reset()
parser_input.next_token()
assert parser_input.the_token_after(skip_whitespace).repr_value == expected
def test_i_can_define_a_sub_part():
text = "Hello Koffi the great guy."
tokens = list(Tokenizer(text))
parser_input = ParserInput(None, tokens, 2, 6)
assert repr(parser_input) == "ParserInput(from_tokens'Koffi the great')"
parser_input.reset()
assert repr(parser_input) == "ParserInput(from_tokens'Koffi the great')"
res = []
while parser_input.next_token():
res.append(f"{parser_input.token.repr_value}")
assert res == ["Koffi", "the", "great"]
assert repr(parser_input) == "ParserInput(from_tokens'Koffi the great')"
def test_i_can_define_parse_input_from_tokens():
text = "Hello Koffi the great guy."
tokens = list(Tokenizer(text))
parser_input = ParserInput(None, tokens)
assert repr(parser_input) == "ParserInput(from_tokens'Hello Koffi the great guy.')"
parser_input.reset()
assert repr(parser_input) == "ParserInput(from_tokens'Hello Koffi the great guy.')"
res = []
while parser_input.next_token():
res.append(f"{parser_input.token.repr_value}")
assert res == ["Hello", "Koffi", "the", "great", "guy", "."]
assert repr(parser_input) == "ParserInput(from_tokens'Hello Koffi the great guy.')"