Files
Sheerka-Old/tests/sheerkaql/test_lexer.py
T
kodjo 1059ce25c5 Fixed #68: Implement SheerkaQL
Fixed #70: SheerkaFilterManager : Pipe functions
Fixed #71: SheerkaFilterManager : filter_objects
Fixed #75: SheerkaMemory: Enhance memory() to use the filtering capabilities
Fixed #76: SheerkaEvaluateConcept: Concepts that modify the state of the system must not be evaluated during question
2021-04-26 19:13:47 +02:00

123 lines
3.8 KiB
Python

from contextlib import contextmanager
from ply import lex
from sheerkaql import lexer
def compare(a, b):
return (a.type == b.type and a.value == b.value and
a.lexpos == b.lexpos and a.lineno == b.lineno)
def token(token_type, value, pos, line):
t = lex.LexToken()
t.type = token_type
t.value = value
t.lexpos = pos
t.lineno = line
return t
@contextmanager
def comparable_tokens():
eq = lex.LexToken.__eq__
ne = lex.LexToken.__ne__
setattr(lex.LexToken, "__eq__", compare)
setattr(lex.LexToken, "__ne__", lambda a, b: not compare(a, b))
yield
setattr(lex.LexToken, "__eq__", eq)
setattr(lex.LexToken, "__ne__", ne)
class TestSheerkaQueryLanguageLexer:
def test_context_manager(self):
t1 = token("NAME", 'a', 0, 1)
t2 = token("NAME", 'a', 0, 1)
assert t1 != t2
with comparable_tokens():
assert t1 == t2
def test_NAME(self):
clex = lexer.Lexer()
clex.input('a 9a')
tokens = [token("NAME", 'a', 0, 1),
token("NUMBER", 9, 2, 1),
token("NAME", 'a', 3, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_STRING(self):
clex = lexer.Lexer()
clex.input("'asdf' \"asdf\" '\n'")
tokens = [token("STRING", 'asdf', 0, 1),
token("STRING", 'asdf', 7, 1),
token("STRING", '\n', 14, 1), ]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_HEX(self):
clex = lexer.Lexer()
clex.input("0xab 0xab")
tokens = [token("NUMBER", 0xab, 0, 1),
token("NUMBER", 171, 5, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_FLOAT(self):
clex = lexer.Lexer()
clex.input("1.2 .2 2.3e4 .2 2.3e4")
tokens = [token("NUMBER", 1.2, 0, 1), token("NUMBER", .2, 4, 1),
token("NUMBER", 2.3e4, 7, 1), token("NUMBER", .2, 13, 1),
token("NUMBER", 2.3e4, 16, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_OCT(self):
clex = lexer.Lexer()
clex.input("073 073 073")
tokens = [token("NUMBER", 0o73, 0, 1),
token("NUMBER", 59, 4, 1),
token("NUMBER", 59, 8, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_INTEGER(self):
clex = lexer.Lexer()
clex.input("73 730 7")
tokens = [token("NUMBER", 73, 0, 1),
token("NUMBER", 730, 3, 1),
token("NUMBER", 7, 7, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_KEYWORDS(self):
for value, token_type in list(lexer.reserved.items()):
clex = lexer.Lexer()
clex.input(value)
tokens = [token(token_type, value, 0, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t
def test_chrs(self):
for token_type, value in [(attr[2:], getattr(lexer.Lexer, attr))
for attr in dir(lexer.Lexer)
if attr[:2] == 't_' and
isinstance(getattr(lexer.Lexer, attr), str) and
attr[2:] != 'ignore']:
if value[0] == '\\':
value = value[1:]
clex = lexer.Lexer()
clex.input(value)
tokens = [token(token_type, value, 0, 1)]
with comparable_tokens():
for t in tokens:
assert next(clex) == t