144 lines
4.6 KiB
Python
144 lines
4.6 KiB
Python
import pytest
|
|
from core.sheerka.services.SheerkaExecute import ParserInput
|
|
from core.tokenizer import Tokenizer, TokenKind
|
|
|
|
|
|
@pytest.mark.parametrize("text, start, end, expected", [
|
|
("def concept a", None, None, "def concept a"),
|
|
("&é#(-è_çà)='string'", None, None, "&é#(-è_çà)='string'"),
|
|
("def concept a", 2, None, "concept a"),
|
|
("def concept a", 0, 2, "def concept"),
|
|
])
|
|
def test_i_can_use_parser_input(text, start, end, expected):
|
|
parser_input = ParserInput(text, start=start, end=end).reset()
|
|
assert parser_input.as_text() == expected
|
|
|
|
|
|
def test_i_can_get_the_next_token_when_yield_eof_is_activated():
|
|
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=True).reset()
|
|
res = []
|
|
parser_input.next_token()
|
|
while True:
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
if parser_input.token.type == TokenKind.EOF:
|
|
break
|
|
parser_input.next_token()
|
|
|
|
expected = ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'", '<EOF>']
|
|
|
|
assert res == expected
|
|
|
|
|
|
def test_i_can_get_the_next_token_when_yield_eof_is_deactivated():
|
|
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset()
|
|
res = []
|
|
while parser_input.next_token():
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
|
|
expected = ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'"]
|
|
|
|
assert res == expected
|
|
|
|
|
|
def test_i_can_get_the_next_token_when_start_and_end_are_provided():
|
|
parser_input = ParserInput("def concept a concept name from bnf 'xyz' as 'xyz'", start=4, end=9).reset()
|
|
res = []
|
|
while parser_input.next_token(skip_whitespace=False):
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
|
|
assert res == ['a', '<ws>', 'concept', '<ws>', 'name', '<ws>']
|
|
|
|
|
|
def test_i_can_get_next_token_when_yield_eof_is_false():
|
|
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset()
|
|
res = []
|
|
while parser_input.next_token():
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
|
|
assert res == ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'"]
|
|
|
|
|
|
def test_i_can_override_yield_oef_within_reset():
|
|
parser_input = ParserInput("def concept a from bnf 'xyz' as 'xyz'", yield_oef=False).reset(yield_oef=True)
|
|
res = []
|
|
parser_input.next_token()
|
|
while True:
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
if parser_input.token.type == TokenKind.EOF:
|
|
break
|
|
parser_input.next_token()
|
|
|
|
assert res == ['def', 'concept', 'a', 'from', 'bnf', "'xyz'", 'as', "'xyz'", "<EOF>"]
|
|
assert not parser_input.yield_oef
|
|
|
|
|
|
@pytest.mark.parametrize("list_has_eof, parser_has_eof, reset_has_eof", [
|
|
(True, True, True),
|
|
(True, False, True),
|
|
(False, True, True),
|
|
(False, False, True),
|
|
(True, True, False),
|
|
(True, False, False),
|
|
(False, True, False),
|
|
(False, False, False),
|
|
])
|
|
def test_i_can_get_the_next_token_when_initialised_with_tokens(list_has_eof, parser_has_eof, reset_has_eof):
|
|
tokens = list(Tokenizer(" def concept a as 'xyz' ", yield_eof=list_has_eof))
|
|
parser_input = ParserInput(" def concept a as 'xyz' ", tokens, yield_oef=parser_has_eof).reset()
|
|
parser_input.reset(reset_has_eof)
|
|
res = []
|
|
while parser_input.next_token():
|
|
res.append(f"{parser_input.token.repr_value}")
|
|
|
|
expected = ['def', 'concept', 'a', 'as', "'xyz'"]
|
|
if reset_has_eof:
|
|
expected.append("<EOF>")
|
|
assert res == expected
|
|
|
|
|
|
def test_i_can_parse_twice():
|
|
text = """
|
|
def concept a + b
|
|
where isinstance(a, int) and isinstance(b, int)
|
|
pre isinstance(a, int) and isinstance(b, int)
|
|
post isinstance(res, int)
|
|
as:
|
|
def func(x,y):
|
|
return x+y
|
|
func(a,b)
|
|
"""
|
|
|
|
p1 = ParserInput(text).reset()
|
|
while p1.next_token():
|
|
pass
|
|
|
|
p1.reset()
|
|
p2 = ParserInput(text).reset()
|
|
|
|
while p1.next_token():
|
|
p2.next_token()
|
|
assert p1.token == p2.token
|
|
|
|
p1.reset()
|
|
p2 = ParserInput(text).reset()
|
|
|
|
while p2.next_token():
|
|
p1.next_token()
|
|
assert p1.token == p2.token
|
|
|
|
|
|
@pytest.mark.parametrize("text, skip_whitespace, expected", [
|
|
("first second", True, "second"),
|
|
("first second", False, "<ws>"),
|
|
("first", True, "<EOF>"),
|
|
("first", False, "<EOF>"),
|
|
("first ", True, "<EOF>"),
|
|
("first ", False, "<ws>"),
|
|
("first:", True, ":"),
|
|
("first:", False, ":"),
|
|
])
|
|
def test_i_can_get_the_token_after(text, skip_whitespace, expected):
|
|
parser_input = ParserInput(text).reset()
|
|
parser_input.next_token()
|
|
assert parser_input.the_token_after(skip_whitespace).repr_value == expected
|