Added keyword c:xxx: to express that we want the concept, not its body
This commit is contained in:
+14
-7
@@ -3,7 +3,7 @@ from core.tokenizer import Tokenizer, Token, TokenKind, LexerError, Keywords
|
||||
|
||||
|
||||
def test_i_can_tokenize():
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>"
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
||||
tokens = list(Tokenizer(source))
|
||||
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
||||
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
||||
@@ -39,6 +39,9 @@ def test_i_can_tokenize():
|
||||
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 20)
|
||||
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 21)
|
||||
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 22)
|
||||
assert tokens[34] == Token(TokenKind.CONCEPT, 'name', 81, 6, 23)
|
||||
|
||||
assert tokens[35] == Token(TokenKind.EOF, '', 88, 6, 30)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
@@ -58,15 +61,19 @@ def test_i_can_tokenize_identifiers(text, expected):
|
||||
assert comparison == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, error_text, index, line, column", [
|
||||
("'string", "'string", 7, 1, 8),
|
||||
('"string', '"string', 7, 1, 8),
|
||||
('"a" + "string', '"string', 13, 1, 14),
|
||||
('"a"\n\n"string', '"string', 12, 3, 8),
|
||||
@pytest.mark.parametrize("text, message, error_text, index, line, column", [
|
||||
("'string", "Missing Trailing quote", "'string", 7, 1, 8),
|
||||
('"string', "Missing Trailing quote", '"string', 7, 1, 8),
|
||||
('"a" + "string', "Missing Trailing quote", '"string', 13, 1, 14),
|
||||
('"a"\n\n"string', "Missing Trailing quote", '"string', 12, 3, 8),
|
||||
("c::", "Context name not found", "", 2, 1, 3),
|
||||
("c:foo\nbar:", "New line is forbidden in concept name", "foo", 5, 1, 6),
|
||||
("c:foo", "Missing ending colon", "foo", 5, 1, 6)
|
||||
])
|
||||
def test_i_can_detect_unfinished_strings(text, error_text, index, line, column):
|
||||
def test_i_can_detect_unfinished_strings(text, message, error_text, index, line, column):
|
||||
with pytest.raises(LexerError) as e:
|
||||
list(Tokenizer(text))
|
||||
assert e.value.message == message
|
||||
assert e.value.text == error_text
|
||||
assert e.value.index == index
|
||||
assert e.value.line == line
|
||||
|
||||
Reference in New Issue
Block a user