First version of explain. Creating a new parser was a wrong approach. Need to reimplement
This commit is contained in:
@@ -4,7 +4,7 @@ from core.tokenizer import Tokenizer, Token, TokenKind, LexerError, Keywords
|
||||
|
||||
def test_i_can_tokenize():
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
||||
source += "$£€!_identifier°~_^\\`#"
|
||||
source += "$£€!_identifier°~_^\\`==#"
|
||||
tokens = list(Tokenizer(source))
|
||||
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
||||
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
||||
@@ -52,9 +52,10 @@ def test_i_can_tokenize():
|
||||
assert tokens[43] == Token(TokenKind.CARAT, '^', 106, 6, 48)
|
||||
assert tokens[44] == Token(TokenKind.BACK_SLASH, '\\', 107, 6, 49)
|
||||
assert tokens[45] == Token(TokenKind.BACK_QUOTE, '`', 108, 6, 50)
|
||||
assert tokens[46] == Token(TokenKind.HASH, '#', 109, 6, 51)
|
||||
assert tokens[46] == Token(TokenKind.EQUALSEQUALS, '==', 109, 6, 51)
|
||||
assert tokens[47] == Token(TokenKind.HASH, '#', 111, 6, 53)
|
||||
|
||||
assert tokens[47] == Token(TokenKind.EOF, '', 110, 6, 52)
|
||||
assert tokens[48] == Token(TokenKind.EOF, '', 112, 6, 54)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
@@ -74,6 +75,19 @@ def test_i_can_tokenize_identifiers(text, expected):
|
||||
assert comparison == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text", [
|
||||
"123abc",
|
||||
"123",
|
||||
"abc",
|
||||
"abc123"
|
||||
])
|
||||
def test_i_can_parse_word(text):
|
||||
tokens = list(Tokenizer(text, parse_word=True))
|
||||
assert tokens[0].type == TokenKind.WORD
|
||||
assert tokens[0].value == text
|
||||
assert tokens[1].index == len(text)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, message, error_text, index, line, column", [
|
||||
("'string", "Missing Trailing quote", "'string", 7, 1, 8),
|
||||
('"string', "Missing Trailing quote", '"string', 7, 1, 8),
|
||||
|
||||
Reference in New Issue
Block a user