Added keyword c:xxx: to express that we want the concept, not its body
This commit is contained in:
@@ -4,4 +4,14 @@ from core.tokenizer import Tokenizer, Token, TokenKind
|
||||
from parsers.BaseParser import BaseParser
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected_text", [
|
||||
("hello world", "hello world"),
|
||||
("'hello' 'world'", "'hello' 'world'"),
|
||||
("def concept a from", "def concept a from"),
|
||||
("()[]{}1=1.5+-/*><&é", "()[]{}1=1.5+-/*><&é"),
|
||||
("execute(c:concept_name:)", "execute(__C__concept_name__C__)")
|
||||
|
||||
])
|
||||
def test_i_can_get_text_from_tokens(text, expected_text):
|
||||
tokens = list(Tokenizer(text))
|
||||
assert BaseParser.get_text_from_tokens(tokens) == expected_text
|
||||
|
||||
@@ -14,6 +14,10 @@ def get_context():
|
||||
return ExecutionContext("test", Event(), sheerka)
|
||||
|
||||
|
||||
def get_context_name(context):
|
||||
return context.name
|
||||
|
||||
|
||||
@pytest.mark.parametrize("ret_val, expected", [
|
||||
(ReturnValueConcept("some_name", True, ParserResultConcept(value=PythonNode("", None))), True),
|
||||
(ReturnValueConcept("some_name", True, ParserResultConcept(value="other thing")), False),
|
||||
@@ -57,32 +61,6 @@ def test_i_cannot_eval_simple_concept(concept):
|
||||
assert context.sheerka.isinstance(evaluated.value, BuiltinConcepts.NOT_FOR_ME)
|
||||
|
||||
|
||||
#
|
||||
# def test_i_can_eval_expression_that_references_concepts():
|
||||
# context = get_context()
|
||||
# context.sheerka.add_in_cache(Concept("foo"))
|
||||
#
|
||||
# parsed = PythonParser().parse(context, "foo")
|
||||
# evaluated = PythonEvaluator().eval(context, parsed)
|
||||
#
|
||||
# assert evaluated.status
|
||||
# assert evaluated.value == Concept("foo").init_key()
|
||||
#
|
||||
#
|
||||
# def test_i_can_eval_expression_that_references_concepts_with_body():
|
||||
# """
|
||||
# I can test expression with variables
|
||||
# :return:
|
||||
# """
|
||||
# context = get_context()
|
||||
# context.sheerka.add_in_cache(Concept("foo", body="2"))
|
||||
#
|
||||
# parsed = PythonParser().parse(context, "foo")
|
||||
# evaluated = PythonEvaluator().eval(context, parsed)
|
||||
#
|
||||
# assert evaluated.status
|
||||
# assert evaluated.value == 2
|
||||
|
||||
def test_i_can_eval_expression_with_that_references_concepts():
|
||||
"""
|
||||
I can test modules with variables
|
||||
@@ -127,24 +105,24 @@ def test_i_can_eval_module_with_that_references_concepts_with_body():
|
||||
assert evaluated.status
|
||||
assert evaluated.value == 2
|
||||
|
||||
#
|
||||
# def test_i_can_eval_concept_with_props():
|
||||
# context = get_context()
|
||||
# context.sheerka.add_in_cache(Concept("foo").set_prop("prop", "'a'"))
|
||||
#
|
||||
# parsed = PythonParser().parse(context, "foo")
|
||||
# evaluated = PythonEvaluator().eval(context, parsed)
|
||||
#
|
||||
# assert evaluated.status
|
||||
# assert evaluated.value == Concept("foo").set_prop("prop", "a").init_key() # evaluated version of foo
|
||||
#
|
||||
#
|
||||
# def test_i_cannot_eval_when_body_references_unknown_concept():
|
||||
# context = get_context()
|
||||
# context.sheerka.add_in_cache(Concept("foo", body="bar"))
|
||||
#
|
||||
# parsed = PythonParser().parse(context, "foo")
|
||||
# evaluated = PythonEvaluator().eval(context, parsed)
|
||||
#
|
||||
# assert not evaluated.status
|
||||
# assert context.sheerka.isinstance(evaluated.value, BuiltinConcepts.ERROR)
|
||||
|
||||
def test_i_can_eval_concept_token():
|
||||
context = get_context()
|
||||
context.sheerka.add_in_cache(Concept("foo", body="2"))
|
||||
|
||||
parsed = PythonParser().parse(context, "get_context_name(c:foo:)")
|
||||
python_evaluator = PythonEvaluator()
|
||||
python_evaluator.locals["get_context_name"] = get_context_name
|
||||
evaluated = python_evaluator.eval(context, parsed)
|
||||
|
||||
assert evaluated.status
|
||||
assert evaluated.value == "foo"
|
||||
|
||||
# sanity, to make sure that otherwise foo is resolved to '2'
|
||||
parsed = PythonParser().parse(context, "get_context_name(foo)")
|
||||
python_evaluator = PythonEvaluator()
|
||||
python_evaluator.locals["get_context_name"] = get_context_name
|
||||
evaluated = python_evaluator.eval(context, parsed)
|
||||
|
||||
assert not evaluated.status
|
||||
assert evaluated.body.body.args[0] == "'int' object has no attribute 'name'"
|
||||
|
||||
@@ -55,3 +55,15 @@ def test_i_can_detect_error():
|
||||
assert isinstance(res.value, ParserResultConcept)
|
||||
assert isinstance(res.value.value[0], PythonErrorNode)
|
||||
assert isinstance(res.value.value[0].exception, SyntaxError)
|
||||
|
||||
|
||||
def test_i_can_parse_a_concept():
|
||||
text = "c:concept_name: + 1"
|
||||
|
||||
parser = PythonParser()
|
||||
res = parser.parse(get_context(), text)
|
||||
|
||||
assert res
|
||||
assert res.value.value == PythonNode(
|
||||
"c:concept_name: + 1",
|
||||
ast.parse("__C__concept_name__C__+1", mode="eval"))
|
||||
|
||||
+14
-7
@@ -3,7 +3,7 @@ from core.tokenizer import Tokenizer, Token, TokenKind, LexerError, Keywords
|
||||
|
||||
|
||||
def test_i_can_tokenize():
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>"
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
||||
tokens = list(Tokenizer(source))
|
||||
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
||||
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
||||
@@ -39,6 +39,9 @@ def test_i_can_tokenize():
|
||||
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 20)
|
||||
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 21)
|
||||
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 22)
|
||||
assert tokens[34] == Token(TokenKind.CONCEPT, 'name', 81, 6, 23)
|
||||
|
||||
assert tokens[35] == Token(TokenKind.EOF, '', 88, 6, 30)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
@@ -58,15 +61,19 @@ def test_i_can_tokenize_identifiers(text, expected):
|
||||
assert comparison == expected
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, error_text, index, line, column", [
|
||||
("'string", "'string", 7, 1, 8),
|
||||
('"string', '"string', 7, 1, 8),
|
||||
('"a" + "string', '"string', 13, 1, 14),
|
||||
('"a"\n\n"string', '"string', 12, 3, 8),
|
||||
@pytest.mark.parametrize("text, message, error_text, index, line, column", [
|
||||
("'string", "Missing Trailing quote", "'string", 7, 1, 8),
|
||||
('"string', "Missing Trailing quote", '"string', 7, 1, 8),
|
||||
('"a" + "string', "Missing Trailing quote", '"string', 13, 1, 14),
|
||||
('"a"\n\n"string', "Missing Trailing quote", '"string', 12, 3, 8),
|
||||
("c::", "Context name not found", "", 2, 1, 3),
|
||||
("c:foo\nbar:", "New line is forbidden in concept name", "foo", 5, 1, 6),
|
||||
("c:foo", "Missing ending colon", "foo", 5, 1, 6)
|
||||
])
|
||||
def test_i_can_detect_unfinished_strings(text, error_text, index, line, column):
|
||||
def test_i_can_detect_unfinished_strings(text, message, error_text, index, line, column):
|
||||
with pytest.raises(LexerError) as e:
|
||||
list(Tokenizer(text))
|
||||
assert e.value.message == message
|
||||
assert e.value.text == error_text
|
||||
assert e.value.index == index
|
||||
assert e.value.line == line
|
||||
|
||||
Reference in New Issue
Block a user