Added first version of console autocompletion
This commit is contained in:
@@ -0,0 +1,87 @@
|
||||
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory, \
|
||||
FunctionParametersObj
|
||||
|
||||
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
|
||||
|
||||
|
||||
class TestSheerkaFunctionsParametersHistory(TestUsingMemoryBasedSheerka):
|
||||
def test_i_can_add_a_parameter_value(self):
|
||||
sheerka, context = self.init_concepts(cache_only=False)
|
||||
service = SheerkaFunctionsParametersHistory(sheerka).initialize()
|
||||
|
||||
service.record_function_parameter(context, "function", 1, "10")
|
||||
service.record_function_parameter(context, "function", 2, "True")
|
||||
service.record_function_parameter(context, "function", 3, "'string value'")
|
||||
|
||||
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||
context.event.get_digest(),
|
||||
"function",
|
||||
{
|
||||
1: [('10', 1)],
|
||||
2: [('True', 1)],
|
||||
3: [("'string value'", 1)]
|
||||
})}
|
||||
|
||||
# and i can serialize
|
||||
sheerka.cache_manager.commit(context)
|
||||
from_db = sheerka.sdp.get(SheerkaFunctionsParametersHistory.FUNCTIONS_PARAMETERS_ENTRY, "function")
|
||||
assert from_db.event_id == context.event.get_digest()
|
||||
assert from_db.name == "function"
|
||||
assert from_db.params == {
|
||||
1: [('10', 1)],
|
||||
2: [('True', 1)],
|
||||
3: [("'string value'", 1)]
|
||||
}
|
||||
|
||||
def test_i_can_add_the_same_value_multiple_times(self):
|
||||
sheerka, context = self.init_concepts(cache_only=True)
|
||||
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||
|
||||
service.record_function_parameter(context, "function", 1, "10")
|
||||
service.record_function_parameter(context, "function", 1, "20")
|
||||
service.record_function_parameter(context, "function", 2, "True")
|
||||
service.record_function_parameter(context, "function", 1, "20")
|
||||
|
||||
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||
context.event.get_digest(),
|
||||
"function",
|
||||
{
|
||||
1: [('10', 1), ('20', 2)],
|
||||
2: [('True', 1)],
|
||||
})}
|
||||
|
||||
def test_i_can_specify_parameter_in_any_order(self):
|
||||
sheerka, context = self.init_concepts()
|
||||
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||
|
||||
service.record_function_parameter(context, "function", 3, "'string value'")
|
||||
service.record_function_parameter(context, "function", 2, "True")
|
||||
|
||||
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||
context.event.get_digest(),
|
||||
"function",
|
||||
{
|
||||
2: [('True', 1)],
|
||||
3: [("'string value'", 1)]
|
||||
})}
|
||||
|
||||
def test_no_value_is_managed(self):
|
||||
sheerka, context = self.init_concepts()
|
||||
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||
|
||||
# no entry for the function
|
||||
assert service.get_function_parameters("function", 2) == []
|
||||
|
||||
# no entry for the parameter number
|
||||
service.record_function_parameter(context, "function", 1, "'string value'")
|
||||
assert service.get_function_parameters("function", 2) == []
|
||||
|
||||
def test_i_can_get_sorted_parameters(self):
|
||||
sheerka, context = self.init_concepts()
|
||||
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||
|
||||
service.record_function_parameter(context, "function", 2, "'string value'")
|
||||
service.record_function_parameter(context, "function", 2, "True")
|
||||
service.record_function_parameter(context, "function", 2, "True")
|
||||
|
||||
assert service.get_function_parameters("function", 2) == ["True", "'string value'"]
|
||||
@@ -187,6 +187,27 @@ class EvaluatorAllSuppressFooEntry(EvaluatorAllWithPriority):
|
||||
return None
|
||||
|
||||
|
||||
class EvaluatorOneDoNotModifyExecutionFlow(EvaluatorOneWithPriority):
|
||||
"""
|
||||
To test that when eval() returns the initial return_value, the execution flow is not modified
|
||||
ie : the new return_value is not added and the old one is not removed
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__("do_no_modify_flow", 50)
|
||||
|
||||
def matches(self, context, return_value):
|
||||
super().matches(context, return_value)
|
||||
return context.sheerka.isinstance(return_value.body, "foo")
|
||||
|
||||
def eval(self, context, return_value):
|
||||
super().eval(context, return_value)
|
||||
|
||||
# I can modify the return_value, but I must return it
|
||||
return_value.parents = [BaseTest.tretval(context.sheerka, Concept("ERROR"))]
|
||||
return return_value
|
||||
|
||||
|
||||
class TestSheerkaExecuteEvaluators(TestUsingMemoryBasedSheerka):
|
||||
|
||||
@classmethod
|
||||
@@ -422,3 +443,45 @@ class TestSheerkaExecuteEvaluators(TestUsingMemoryBasedSheerka):
|
||||
"__EVALUATION [0] init_multiple - init_evaluator - target=['foo', 'bar', 'baz']",
|
||||
'__EVALUATION [0] init_multiple - eval - target=baz',
|
||||
]
|
||||
|
||||
def test_return_value_is_not_removed_if_same_as_input(self):
|
||||
"""
|
||||
In this test, EvaluatorOneDoNotModifyExecutionFlow returns the initial return_value
|
||||
So the initial entries are not modified
|
||||
:return:
|
||||
"""
|
||||
sheerka = self.get_sheerka()
|
||||
sheerka.evaluators = [EvaluatorOneDoNotModifyExecutionFlow]
|
||||
|
||||
entries = [self.tretval(sheerka, Concept("foo"))]
|
||||
Out.debug_out = []
|
||||
res = sheerka.execute(self.get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
|
||||
|
||||
assert Out.debug_out == [
|
||||
'__EVALUATION [0] do_no_modify_flow - matches - target=foo',
|
||||
'__EVALUATION [0] do_no_modify_flow - eval - target=foo',
|
||||
]
|
||||
|
||||
assert res == entries
|
||||
|
||||
def test_new_return_value_is_added_and_old_return_value_is_removed(self):
|
||||
"""
|
||||
In this test EvaluatorOneModifyFoo modifies 'foo' into 'bar'
|
||||
So the new return_value (with 'bar' is added) and the old one (with 'foo') is removed
|
||||
:return:
|
||||
"""
|
||||
sheerka = self.get_sheerka()
|
||||
sheerka.evaluators = [EvaluatorOneModifyFoo]
|
||||
|
||||
entries = [self.tretval(sheerka, Concept("foo"))]
|
||||
Out.debug_out = []
|
||||
res = sheerka.execute(self.get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
|
||||
|
||||
assert Out.debug_out == [
|
||||
'__EVALUATION [0] modifyFoo - matches - target=foo',
|
||||
'__EVALUATION [0] modifyFoo - eval - target=foo',
|
||||
'__EVALUATION [1] modifyFoo - matches - target=bar',
|
||||
]
|
||||
|
||||
# check that 'foo' is no longer in res, but 'bar' is added
|
||||
assert res == [self.tretval(sheerka, Concept("bar"))]
|
||||
|
||||
@@ -4,7 +4,7 @@ from core.tokenizer import Tokenizer, Token, TokenKind, LexerError, Keywords
|
||||
|
||||
def test_i_can_tokenize():
|
||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
||||
source += "$£€!_identifier°~_^\\`==#__var__10"
|
||||
source += "$£€!_identifier°~_^\\`==#__var__10r/regex\nregex/"
|
||||
tokens = list(Tokenizer(source))
|
||||
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
||||
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
||||
@@ -33,30 +33,31 @@ def test_i_can_tokenize():
|
||||
assert tokens[24] == Token(TokenKind.NUMBER, "10", 47, 5, 23)
|
||||
assert tokens[25] == Token(TokenKind.WHITESPACE, " ", 49, 5, 25)
|
||||
assert tokens[26] == Token(TokenKind.STRING, "'string\n'", 50, 5, 26)
|
||||
assert tokens[27] == Token(TokenKind.WHITESPACE, " ", 59, 6, 1)
|
||||
assert tokens[28] == Token(TokenKind.STRING, '"another string"', 60, 6, 2)
|
||||
assert tokens[29] == Token(TokenKind.EQUALS, '=', 76, 6, 18)
|
||||
assert tokens[30] == Token(TokenKind.VBAR, '|', 77, 6, 19)
|
||||
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 20)
|
||||
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 21)
|
||||
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 22)
|
||||
assert tokens[34] == Token(TokenKind.CONCEPT, ('name', None), 81, 6, 23)
|
||||
assert tokens[35] == Token(TokenKind.DOLLAR, '$', 88, 6, 30)
|
||||
assert tokens[36] == Token(TokenKind.STERLING, '£', 89, 6, 31)
|
||||
assert tokens[37] == Token(TokenKind.EURO, '€', 90, 6, 32)
|
||||
assert tokens[38] == Token(TokenKind.EMARK, '!', 91, 6, 33)
|
||||
assert tokens[39] == Token(TokenKind.IDENTIFIER, '_identifier', 92, 6, 34)
|
||||
assert tokens[40] == Token(TokenKind.DEGREE, '°', 103, 6, 45)
|
||||
assert tokens[41] == Token(TokenKind.TILDE, '~', 104, 6, 46)
|
||||
assert tokens[42] == Token(TokenKind.UNDERSCORE, '_', 105, 6, 47)
|
||||
assert tokens[43] == Token(TokenKind.CARAT, '^', 106, 6, 48)
|
||||
assert tokens[44] == Token(TokenKind.BACK_SLASH, '\\', 107, 6, 49)
|
||||
assert tokens[45] == Token(TokenKind.BACK_QUOTE, '`', 108, 6, 50)
|
||||
assert tokens[46] == Token(TokenKind.EQUALSEQUALS, '==', 109, 6, 51)
|
||||
assert tokens[47] == Token(TokenKind.HASH, '#', 111, 6, 53)
|
||||
assert tokens[48] == Token(TokenKind.VAR_DEF, '__var__10', 112, 6, 54)
|
||||
assert tokens[27] == Token(TokenKind.WHITESPACE, " ", 59, 6, 2)
|
||||
assert tokens[28] == Token(TokenKind.STRING, '"another string"', 60, 6, 3)
|
||||
assert tokens[29] == Token(TokenKind.EQUALS, '=', 76, 6, 19)
|
||||
assert tokens[30] == Token(TokenKind.VBAR, '|', 77, 6, 20)
|
||||
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 21)
|
||||
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 22)
|
||||
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 23)
|
||||
assert tokens[34] == Token(TokenKind.CONCEPT, ('name', None), 81, 6, 24)
|
||||
assert tokens[35] == Token(TokenKind.DOLLAR, '$', 88, 6, 31)
|
||||
assert tokens[36] == Token(TokenKind.STERLING, '£', 89, 6, 32)
|
||||
assert tokens[37] == Token(TokenKind.EURO, '€', 90, 6, 33)
|
||||
assert tokens[38] == Token(TokenKind.EMARK, '!', 91, 6, 34)
|
||||
assert tokens[39] == Token(TokenKind.IDENTIFIER, '_identifier', 92, 6, 35)
|
||||
assert tokens[40] == Token(TokenKind.DEGREE, '°', 103, 6, 46)
|
||||
assert tokens[41] == Token(TokenKind.TILDE, '~', 104, 6, 47)
|
||||
assert tokens[42] == Token(TokenKind.UNDERSCORE, '_', 105, 6, 48)
|
||||
assert tokens[43] == Token(TokenKind.CARAT, '^', 106, 6, 49)
|
||||
assert tokens[44] == Token(TokenKind.BACK_SLASH, '\\', 107, 6, 50)
|
||||
assert tokens[45] == Token(TokenKind.BACK_QUOTE, '`', 108, 6, 51)
|
||||
assert tokens[46] == Token(TokenKind.EQUALSEQUALS, '==', 109, 6, 52)
|
||||
assert tokens[47] == Token(TokenKind.HASH, '#', 111, 6, 54)
|
||||
assert tokens[48] == Token(TokenKind.VAR_DEF, '__var__10', 112, 6, 55)
|
||||
assert tokens[49] == Token(TokenKind.REGEX, '/regex\nregex/', 121, 6, 64)
|
||||
|
||||
assert tokens[49] == Token(TokenKind.EOF, '', 121, 6, 63)
|
||||
assert tokens[50] == Token(TokenKind.EOF, '', 135, 7, 7)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
@@ -121,30 +122,29 @@ def test_i_can_detect_tokenizer_errors(text, message, error_text, index, line, c
|
||||
assert e.value.column == column
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text, expected_text, expected_newlines", [
|
||||
("'foo'", "'foo'", 0),
|
||||
('"foo"', '"foo"', 0),
|
||||
("'foo\rbar'", "'foo\rbar'", 1),
|
||||
("'foo\nbar'", "'foo\nbar'", 1),
|
||||
("'foo\n\rbar'", "'foo\n\rbar'", 1),
|
||||
("'foo\r\nbar'", "'foo\r\nbar'", 1),
|
||||
("'foo\r\rbar'", "'foo\r\rbar'", 2),
|
||||
("'foo\n\nbar'", "'foo\n\nbar'", 2),
|
||||
("'foo\r\n\n\rbar'", "'foo\r\n\n\rbar'", 2),
|
||||
("'\rfoo\rbar\r'", "'\rfoo\rbar\r'", 3),
|
||||
("'\nfoo\nbar\n'", "'\nfoo\nbar\n'", 3),
|
||||
("'\n\rfoo\r\n'", "'\n\rfoo\r\n'", 2),
|
||||
(r"'foo\'bar'", r"'foo\'bar'", 0),
|
||||
(r'"foo\"bar"', r'"foo\"bar"', 0),
|
||||
('"foo"bar"', '"foo"', 0),
|
||||
("'foo'bar'", "'foo'", 0),
|
||||
@pytest.mark.parametrize("text, expected_text, expected_newlines, expected_column", [
|
||||
("'foo'", "'foo'", 0, 6),
|
||||
('"foo"', '"foo"', 0, 6),
|
||||
("'foo\nbar'", "'foo\nbar'", 1, 5),
|
||||
("'foo\rbar'", "'foo\rbar'", 0, 10),
|
||||
("'foo\n\rbar'", "'foo\n\rbar'", 1, 6),
|
||||
("'foo\r\nbar'", "'foo\r\nbar'", 1, 5),
|
||||
("'foo\n\nbar'", "'foo\n\nbar'", 2, 5),
|
||||
("'foo\r\n\n\rbar'", "'foo\r\n\n\rbar'", 2, 6),
|
||||
("'\nfoo\nbar\n'", "'\nfoo\nbar\n'", 3, 2),
|
||||
("'\n\rfoo\r\n'", "'\n\rfoo\r\n'", 2, 2),
|
||||
(r"'foo\'bar'", r"'foo\'bar'", 0, 11),
|
||||
(r'"foo\"bar"', r'"foo\"bar"', 0, 11),
|
||||
('"foo"bar"', '"foo"', 0, 6),
|
||||
("'foo'bar'", "'foo'", 0, 6),
|
||||
])
|
||||
def test_i_can_parse_strings(text, expected_text, expected_newlines):
|
||||
def test_i_can_parse_strings(text, expected_text, expected_newlines, expected_column):
|
||||
lexer = Tokenizer(text)
|
||||
text_found, nb_of_newlines = lexer.eat_string(0, 1, 1)
|
||||
text_found, nb_of_newlines, column_index = lexer.eat_string(0, 1, 1)
|
||||
|
||||
assert nb_of_newlines == expected_newlines
|
||||
assert text_found == expected_text
|
||||
assert nb_of_newlines == expected_newlines
|
||||
assert column_index == expected_column
|
||||
|
||||
|
||||
@pytest.mark.parametrize("text", [
|
||||
|
||||
Reference in New Issue
Block a user