169 lines
5.4 KiB
Python
169 lines
5.4 KiB
Python
import pytest
|
|
|
|
from parsers.ParserInput import ParserInput
|
|
from parsers.tokenizer import LexerError, TokenKind
|
|
|
|
|
|
def test_i_can_parser_input():
|
|
parser_input = ParserInput("def concept a")
|
|
assert parser_input.init() is True
|
|
assert parser_input.exception is None
|
|
assert parser_input.pos == -1
|
|
assert parser_input.end == 6
|
|
|
|
|
|
def test_i_can_detect_errors():
|
|
parser_input = ParserInput('def concept "a')
|
|
assert parser_input.init() is False
|
|
assert isinstance(parser_input.exception, LexerError)
|
|
|
|
|
|
def test_can_as_text_and_track_tokens():
|
|
parser_input = ParserInput("execute(c:name1: if r:#id: else c:name2:)")
|
|
parser_input.init()
|
|
|
|
switcher = {TokenKind.CONCEPT: lambda t: f"__CONCEPT__{t.value[0]}",
|
|
TokenKind.RULE: lambda t: f"__RULE__{t.value[1]}"}
|
|
tracker = {}
|
|
text = parser_input.as_text(switcher, tracker)
|
|
|
|
assert text == "execute(__CONCEPT__name1 if __RULE__id else __CONCEPT__name2)"
|
|
assert len(tracker) == 3
|
|
assert tracker["__CONCEPT__name1"] == parser_input.all_tokens[2]
|
|
assert tracker["__RULE__id"] == parser_input.all_tokens[6]
|
|
assert tracker["__CONCEPT__name2"] == parser_input.all_tokens[10]
|
|
|
|
|
|
def test_i_must_call_init_before_call_as_text():
|
|
parser_input = ParserInput("execute(c:name1: if r:#id: else c:name2:)")
|
|
with pytest.raises(Exception) as ex:
|
|
parser_input.as_text()
|
|
|
|
assert ex.value.args[0] == "You must call init() first !"
|
|
|
|
|
|
def test_i_can_get_next_token():
|
|
parser_input = ParserInput("def concept a")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "def"
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "concept"
|
|
|
|
assert parser_input.next_token(skip_whitespace=False) is True
|
|
assert parser_input.token.type == TokenKind.WHITESPACE
|
|
assert parser_input.token.value == " "
|
|
|
|
assert parser_input.next_token(skip_whitespace=False) is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "a"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_next_after_eof_is_eof():
|
|
parser_input = ParserInput("hi")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "hi"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_i_can_manage_blank_input():
|
|
parser_input = ParserInput(" ")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_i_can_manage_blank_input_when_skip_whitespace_is_false():
|
|
parser_input = ParserInput(" ")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token(skip_whitespace=False) is True
|
|
assert parser_input.token.type == TokenKind.WHITESPACE
|
|
assert parser_input.token.value == " "
|
|
|
|
assert parser_input.next_token(skip_whitespace=False) is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_i_can_reset():
|
|
parser_input = ParserInput("hello world ")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "hello"
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "world"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
parser_input.reset()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "hello"
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "world"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_i_can_parse_when_input_ends_by_white_space():
|
|
parser_input = ParserInput("hello world ")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "hello"
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "world"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|
|
|
|
|
|
def test_i_can_parse_when_input_starts_by_white_space():
|
|
parser_input = ParserInput(" hello world")
|
|
parser_input.init()
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "hello"
|
|
|
|
assert parser_input.next_token() is True
|
|
assert parser_input.token.type == TokenKind.IDENTIFIER
|
|
assert parser_input.token.value == "world"
|
|
|
|
assert parser_input.next_token() is False
|
|
assert parser_input.token.type == TokenKind.EOF
|