Added first version of console autocompletion
This commit is contained in:
@@ -3,6 +3,8 @@
|
|||||||
test: clean
|
test: clean
|
||||||
py.test tests
|
py.test tests
|
||||||
|
|
||||||
|
freeze-req:
|
||||||
|
pip freeze > requirements.txt
|
||||||
|
|
||||||
clean:
|
clean:
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
import getopt
|
import getopt
|
||||||
import sys
|
import sys
|
||||||
from os import path
|
|
||||||
import click
|
|
||||||
import core.utils
|
import core.utils
|
||||||
from core.sheerka.Sheerka import Sheerka
|
from core.sheerka.Sheerka import Sheerka
|
||||||
from prompt_toolkit import prompt
|
from repl.SheerkaPrompt import SheerkaPrompt
|
||||||
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
|
||||||
from prompt_toolkit.history import FileHistory
|
|
||||||
|
|
||||||
|
|
||||||
def usage():
|
def usage():
|
||||||
@@ -29,7 +26,7 @@ def main(argv):
|
|||||||
return True
|
return True
|
||||||
if o in ('-d', "--debug"):
|
if o in ('-d', "--debug"):
|
||||||
debug = True
|
debug = True
|
||||||
if o in ('-l', '-logger'):
|
if o in ('-l', '--logger'):
|
||||||
loggers.add(a)
|
loggers.add(a)
|
||||||
if o in ('-i', '--interactive'):
|
if o in ('-i', '--interactive'):
|
||||||
interactive = True
|
interactive = True
|
||||||
@@ -39,30 +36,10 @@ def main(argv):
|
|||||||
|
|
||||||
sheerka = Sheerka(debug=debug, loggers=loggers)
|
sheerka = Sheerka(debug=debug, loggers=loggers)
|
||||||
sheerka.initialize()
|
sheerka.initialize()
|
||||||
history_file = path.abspath(path.join(path.expanduser("~"), ".sheerka", "history.txt"))
|
|
||||||
|
|
||||||
if interactive:
|
if interactive:
|
||||||
while True:
|
result = SheerkaPrompt(sheerka).run()
|
||||||
try:
|
sys.exit(result)
|
||||||
_in = prompt('sheerka> ',
|
|
||||||
history=FileHistory(history_file),
|
|
||||||
auto_suggest=AutoSuggestFromHistory(),
|
|
||||||
)
|
|
||||||
if _in in ("exit", "quit", "bye"):
|
|
||||||
print("Take care.")
|
|
||||||
break
|
|
||||||
|
|
||||||
if _in == '__':
|
|
||||||
_in = click.edit()
|
|
||||||
|
|
||||||
result = sheerka.evaluate_user_input(_in)
|
|
||||||
sheerka.print(result)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
continue
|
|
||||||
except EOFError:
|
|
||||||
print("EOFError...")
|
|
||||||
sys.exit(3)
|
|
||||||
sys.exit(0)
|
|
||||||
else:
|
else:
|
||||||
_in = core.utils.sysarg_to_string(args)
|
_in = core.utils.sysarg_to_string(args)
|
||||||
result = sheerka.evaluate_user_input(_in)
|
result = sheerka.evaluate_user_input(_in)
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ attrs==19.3.0
|
|||||||
click==7.1.1
|
click==7.1.1
|
||||||
fs==2.4.11
|
fs==2.4.11
|
||||||
gprof2dot==2017.9.19
|
gprof2dot==2017.9.19
|
||||||
|
jedi==0.17.0
|
||||||
more-itertools==7.2.0
|
more-itertools==7.2.0
|
||||||
packaging==19.2
|
packaging==19.2
|
||||||
|
parso==0.7.0
|
||||||
pluggy==0.13.0
|
pluggy==0.13.0
|
||||||
prompt-toolkit==3.0.5
|
prompt-toolkit==3.0.5
|
||||||
py==1.8.0
|
py==1.8.0
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ from sdp.sheerkaDataProvider import SheerkaDataProvider, Event
|
|||||||
|
|
||||||
BASE_NODE_PARSER_CLASS = "parsers.BaseNodeParser.BaseNodeParser"
|
BASE_NODE_PARSER_CLASS = "parsers.BaseNodeParser.BaseNodeParser"
|
||||||
CONCEPTS_FILE = "_concepts.txt"
|
CONCEPTS_FILE = "_concepts.txt"
|
||||||
|
EXIT_COMMANDS = ("quit", "exit", "bye")
|
||||||
|
|
||||||
|
|
||||||
class Sheerka(Concept):
|
class Sheerka(Concept):
|
||||||
|
|||||||
@@ -0,0 +1,31 @@
|
|||||||
|
from core.builtin_concepts import BuiltinConcepts
|
||||||
|
from core.sheerka.services.sheerka_service import BaseService
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaAdmin(BaseService):
|
||||||
|
NAME = "Admin"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka)
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.bind_service_method(self.caches_names)
|
||||||
|
self.sheerka.bind_service_method(self.cache)
|
||||||
|
|
||||||
|
def caches_names(self):
|
||||||
|
"""
|
||||||
|
Returns the name of all the caches
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
return list(self.sheerka.cache_manager.caches.keys())
|
||||||
|
|
||||||
|
def cache(self, name):
|
||||||
|
"""
|
||||||
|
Returns the content of a cache
|
||||||
|
:param name:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
if name not in self.sheerka.cache_manager.caches:
|
||||||
|
return self.sheerka.new(BuiltinConcepts.NOT_FOUND, body={"cache": name})
|
||||||
|
|
||||||
|
return self.sheerka.cache_manager.caches[name].cache.copy()
|
||||||
@@ -130,6 +130,12 @@ class SheerkaComparisonManager(BaseService):
|
|||||||
return self._inner_add_comparison(comparison_obj)
|
return self._inner_add_comparison(comparison_obj)
|
||||||
|
|
||||||
def get_partition(self, prop_name, comparison_context="#"):
|
def get_partition(self, prop_name, comparison_context="#"):
|
||||||
|
"""
|
||||||
|
Returns the equivalent classes for the property, using the comparison_context
|
||||||
|
:param prop_name:
|
||||||
|
:param comparison_context:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
weighted_concept = self.get_concepts_weights(prop_name, comparison_context)
|
weighted_concept = self.get_concepts_weights(prop_name, comparison_context)
|
||||||
|
|
||||||
return self._get_partition(weighted_concept)
|
return self._get_partition(weighted_concept)
|
||||||
|
|||||||
@@ -306,9 +306,17 @@ class SheerkaExecute(BaseService):
|
|||||||
|
|
||||||
result = evaluator.eval(sub_context, item)
|
result = evaluator.eval(sub_context, item)
|
||||||
if result is None:
|
if result is None:
|
||||||
|
# match() was successful but nothing was done in eval
|
||||||
|
# most of the time, it's because checks made in eval were unsuccessful
|
||||||
debug_result.append({"input": item, "return_value": None})
|
debug_result.append({"input": item, "return_value": None})
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
if id(result) == id(item):
|
||||||
|
# eval was successful, but we don't want to alter the processing flow
|
||||||
|
debug_result.append({"input": item, "return_value": item})
|
||||||
|
continue
|
||||||
|
|
||||||
|
# otherwise, item will be removed and replaced by result
|
||||||
to_delete.append(item)
|
to_delete.append(item)
|
||||||
if isinstance(result, list):
|
if isinstance(result, list):
|
||||||
evaluated_items.extend(result)
|
evaluated_items.extend(result)
|
||||||
@@ -319,6 +327,8 @@ class SheerkaExecute(BaseService):
|
|||||||
evaluator=evaluator)
|
evaluator=evaluator)
|
||||||
result = self.sheerka.ret("sheerka.process", False, error, parents=[item])
|
result = self.sheerka.ret("sheerka.process", False, error, parents=[item])
|
||||||
evaluated_items.append(result)
|
evaluated_items.append(result)
|
||||||
|
|
||||||
|
# TODO: Add a validation to make sure that item is somewhere in return_value.parents
|
||||||
debug_result.append({"input": item, "return_value": result})
|
debug_result.append({"input": item, "return_value": result})
|
||||||
else:
|
else:
|
||||||
debug_result.append({"input": item, "return_value": NO_MATCH})
|
debug_result.append({"input": item, "return_value": NO_MATCH})
|
||||||
|
|||||||
@@ -0,0 +1,78 @@
|
|||||||
|
from dataclasses import dataclass
|
||||||
|
from operator import itemgetter
|
||||||
|
from typing import Tuple, Dict, List
|
||||||
|
|
||||||
|
from cache.Cache import Cache
|
||||||
|
from core.sheerka.services.sheerka_service import BaseService, ServiceObj
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FunctionParametersObj(ServiceObj):
|
||||||
|
name: str
|
||||||
|
params: Dict[int, List[Tuple[str, int]]]
|
||||||
|
#
|
||||||
|
# params = {
|
||||||
|
# 1 : [('value 1', 1), ('value 2', 5)],
|
||||||
|
# 3 : [('value 3', 5)]
|
||||||
|
#
|
||||||
|
# the key is the number of the parameter
|
||||||
|
# the values are a tuple of (value seen, number of time this value is seen)
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaFunctionsParametersHistory(BaseService):
|
||||||
|
"""
|
||||||
|
This service remembers the parameters expected by functions
|
||||||
|
"""
|
||||||
|
|
||||||
|
NAME = "FunctionsParametersHistory"
|
||||||
|
FUNCTIONS_PARAMETERS_ENTRY = "FunctionsParametersHistory:Functions"
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
super().__init__(sheerka)
|
||||||
|
self.cache = Cache(max_size=1024, default=lambda k: self.sheerka.sdp.get(self.FUNCTIONS_PARAMETERS_ENTRY, k))
|
||||||
|
|
||||||
|
def initialize(self):
|
||||||
|
self.sheerka.cache_manager.register_cache(self.FUNCTIONS_PARAMETERS_ENTRY, self.cache, True, True)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def record_function_parameter(self, context, func_name: str, param_number: int, param_value: str):
|
||||||
|
"""
|
||||||
|
Declare a new value to the parameter and function
|
||||||
|
:param context:
|
||||||
|
:param func_name:
|
||||||
|
:param param_number:
|
||||||
|
:param param_value:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
old = self.cache.get(func_name)
|
||||||
|
if old is not None:
|
||||||
|
if param_number in old.params:
|
||||||
|
lst = old.params[param_number]
|
||||||
|
for i, value in enumerate(lst): # value is a tuple (param_value, counter)
|
||||||
|
if value[0] == param_value:
|
||||||
|
lst[i] = (param_value, value[1] + 1)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
lst.append((param_value, 1))
|
||||||
|
else:
|
||||||
|
old.params[param_number] = [(param_value, 1)]
|
||||||
|
self.cache.put(func_name, old)
|
||||||
|
else:
|
||||||
|
obj = FunctionParametersObj(context.event.get_digest(), func_name, {param_number: [(param_value, 1)]})
|
||||||
|
self.cache.put(func_name, obj)
|
||||||
|
|
||||||
|
def get_function_parameters(self, func_name: str, param_number: int):
|
||||||
|
"""
|
||||||
|
Return the list of param
|
||||||
|
:param func_name:
|
||||||
|
:param param_number:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
values = self.cache.get(func_name)
|
||||||
|
if values is None:
|
||||||
|
return []
|
||||||
|
|
||||||
|
if param_number not in values.params:
|
||||||
|
return []
|
||||||
|
|
||||||
|
return [item[0] for item in sorted(values.params[param_number], key=itemgetter(1), reverse=True)]
|
||||||
@@ -3,7 +3,7 @@ from dataclasses import dataclass
|
|||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class ServiceObj:
|
class ServiceObj:
|
||||||
event_id: str # event where the object is created / modified
|
event_id: str # digest of the event where the object is created / modified
|
||||||
|
|
||||||
|
|
||||||
class BaseService:
|
class BaseService:
|
||||||
|
|||||||
+19
-12
@@ -49,6 +49,7 @@ class TokenKind(Enum):
|
|||||||
WORD = "word"
|
WORD = "word"
|
||||||
EQUALSEQUALS = "=="
|
EQUALSEQUALS = "=="
|
||||||
VAR_DEF = "__var__"
|
VAR_DEF = "__var__"
|
||||||
|
REGEX = "r'xxx' or r\"xxx\" or r:xxx: or r|xxx| or r/xxx/"
|
||||||
|
|
||||||
|
|
||||||
@dataclass()
|
@dataclass()
|
||||||
@@ -322,6 +323,12 @@ class Tokenizer:
|
|||||||
yield Token(TokenKind.CONCEPT, (name, id), self.i, self.line, self.column)
|
yield Token(TokenKind.CONCEPT, (name, id), self.i, self.line, self.column)
|
||||||
self.i += length + 2
|
self.i += length + 2
|
||||||
self.column += length + 2
|
self.column += length + 2
|
||||||
|
elif c == "r" and self.i + 1 < self.text_len and self.text[self.i + 1] in "'\":|/":
|
||||||
|
string, newlines, column_index = self.eat_string(self.i + 1, self.line, self.column)
|
||||||
|
yield Token(TokenKind.REGEX, string, self.i, self.line, self.column) # quotes are kept
|
||||||
|
self.i += len(string) + 1
|
||||||
|
self.column = column_index # 1 if newlines > 0 else self.column + len(string)
|
||||||
|
self.line += newlines
|
||||||
elif self.parse_word and (c.isalpha() or c.isdigit()):
|
elif self.parse_word and (c.isalpha() or c.isdigit()):
|
||||||
word = self.eat_word(self.i)
|
word = self.eat_word(self.i)
|
||||||
yield Token(TokenKind.WORD, word, self.i, self.line, self.column)
|
yield Token(TokenKind.WORD, word, self.i, self.line, self.column)
|
||||||
@@ -340,10 +347,10 @@ class Tokenizer:
|
|||||||
self.i += len(number)
|
self.i += len(number)
|
||||||
self.column += len(number)
|
self.column += len(number)
|
||||||
elif c == "'" or c == '"':
|
elif c == "'" or c == '"':
|
||||||
string, newlines = self.eat_string(self.i, self.line, self.column)
|
string, newlines, column_index = self.eat_string(self.i, self.line, self.column)
|
||||||
yield Token(TokenKind.STRING, string, self.i, self.line, self.column) # quotes are kept
|
yield Token(TokenKind.STRING, string, self.i, self.line, self.column) # quotes are kept
|
||||||
self.i += len(string)
|
self.i += len(string)
|
||||||
self.column = 1 if newlines > 0 else self.column + len(string)
|
self.column = column_index # 1 if newlines > 0 else self.column + len(string)
|
||||||
self.line += newlines
|
self.line += newlines
|
||||||
elif c == "_":
|
elif c == "_":
|
||||||
yield Token(TokenKind.UNDERSCORE, "_", self.i, self.line, self.column)
|
yield Token(TokenKind.UNDERSCORE, "_", self.i, self.line, self.column)
|
||||||
@@ -445,21 +452,20 @@ class Tokenizer:
|
|||||||
quote = self.text[start_index]
|
quote = self.text[start_index]
|
||||||
result = self.text[start_index]
|
result = self.text[start_index]
|
||||||
lines_count = 0
|
lines_count = 0
|
||||||
|
column_index = start_column + 1
|
||||||
|
|
||||||
i = start_index + 1
|
i = start_index + 1
|
||||||
escape = False
|
escape = False
|
||||||
newline = None
|
#newline = None
|
||||||
while i < self.text_len:
|
while i < self.text_len:
|
||||||
c = self.text[i]
|
c = self.text[i]
|
||||||
result += c
|
result += c
|
||||||
i += 1
|
i += 1
|
||||||
|
column_index += 1
|
||||||
|
|
||||||
if newline:
|
if c == "\n":
|
||||||
lines_count += 1
|
lines_count += 1
|
||||||
newline = c if c == newline else None
|
column_index = 1
|
||||||
else:
|
|
||||||
if c == "\r" or c == "\n":
|
|
||||||
newline = c
|
|
||||||
|
|
||||||
if c == "\\":
|
if c == "\\":
|
||||||
escape = True
|
escape = True
|
||||||
@@ -468,15 +474,16 @@ class Tokenizer:
|
|||||||
else:
|
else:
|
||||||
escape = False
|
escape = False
|
||||||
|
|
||||||
# add trailing new line if needed
|
# # add trailing new line if needed
|
||||||
if newline:
|
# if newline:
|
||||||
lines_count += 1
|
# lines_count += 1
|
||||||
|
# column_index = 1
|
||||||
|
|
||||||
if result[-1] != quote:
|
if result[-1] != quote:
|
||||||
raise LexerError("Missing Trailing quote", result, i, start_line + lines_count,
|
raise LexerError("Missing Trailing quote", result, i, start_line + lines_count,
|
||||||
1 if lines_count > 0 else start_column + len(result))
|
1 if lines_count > 0 else start_column + len(result))
|
||||||
|
|
||||||
return result, lines_count
|
return result, lines_count, column_index
|
||||||
|
|
||||||
def eat_word(self, start):
|
def eat_word(self, start):
|
||||||
result = self.text[start]
|
result = self.text[start]
|
||||||
|
|||||||
@@ -13,6 +13,10 @@ class ConceptEvaluator(OneReturnValueEvaluator):
|
|||||||
NAME = "Concept"
|
NAME = "Concept"
|
||||||
|
|
||||||
def __init__(self, return_body=False):
|
def __init__(self, return_body=False):
|
||||||
|
"""
|
||||||
|
|
||||||
|
:param return_body: if True force the body of the concept
|
||||||
|
"""
|
||||||
super().__init__(self.NAME, [BuiltinConcepts.EVALUATION], 50)
|
super().__init__(self.NAME, [BuiltinConcepts.EVALUATION], 50)
|
||||||
self.return_body = return_body
|
self.return_body = return_body
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,110 @@
|
|||||||
|
from collections import namedtuple
|
||||||
|
|
||||||
|
import parso
|
||||||
|
from core.builtin_concepts import BuiltinConcepts
|
||||||
|
from core.sheerka.ExecutionContext import ExecutionContext
|
||||||
|
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory
|
||||||
|
from evaluators.BaseEvaluator import OneReturnValueEvaluator
|
||||||
|
from evaluators.PythonEvaluator import PythonEvaluator
|
||||||
|
from parso.python.tree import Name, PythonNode, Operator
|
||||||
|
|
||||||
|
func_found = namedtuple("func_found", "name params")
|
||||||
|
|
||||||
|
|
||||||
|
class UpdateFunctionsParametersEvaluator(OneReturnValueEvaluator):
|
||||||
|
"""
|
||||||
|
This evaluator scans all successful PythonEvaluator results
|
||||||
|
It then records the parameters of every functions found.
|
||||||
|
For example, if the PythonEvaluator successfully evaluated
|
||||||
|
foo(1, 'string') + bar(3.14)
|
||||||
|
the parameters 1 and 'string' will be recorded for the function 'foo'
|
||||||
|
and the parameter 3.14 will be recorded for the function 'bar'
|
||||||
|
|
||||||
|
These records will later be used as input for auto-completion
|
||||||
|
"""
|
||||||
|
NAME = "UpdateFunctionsParameters"
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__(self.NAME, [BuiltinConcepts.AFTER_EVALUATION], 79)
|
||||||
|
|
||||||
|
def matches(self, context, return_value):
|
||||||
|
"""
|
||||||
|
True if the return value is the successful result of PythonEvaluator
|
||||||
|
:param context:
|
||||||
|
:param return_value:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
return return_value.status and return_value.who == context.sheerka.get_evaluator_name(PythonEvaluator.NAME)
|
||||||
|
|
||||||
|
def eval(self, context: ExecutionContext, return_value):
|
||||||
|
sheerka = context.sheerka
|
||||||
|
params_record_service = sheerka.services[SheerkaFunctionsParametersHistory.NAME]
|
||||||
|
|
||||||
|
if not return_value.parents:
|
||||||
|
return self.add_not_found_error(sheerka, return_value)
|
||||||
|
|
||||||
|
for parent in return_value.parents:
|
||||||
|
if parent.who == sheerka.get_parser_name("Python") and \
|
||||||
|
parent.status and \
|
||||||
|
sheerka.isinstance(parent.body, BuiltinConcepts.PARSER_RESULT):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return self.add_not_found_error(sheerka, return_value)
|
||||||
|
|
||||||
|
source = parent.body.source
|
||||||
|
parsed = parso.parse(source)
|
||||||
|
self.process_functions(context, params_record_service, parsed)
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
def process_functions(self, context, service, node):
|
||||||
|
if not hasattr(node, "children"):
|
||||||
|
return
|
||||||
|
|
||||||
|
if (func := self.get_function(node)) is not None:
|
||||||
|
for i, p in enumerate(func.params):
|
||||||
|
service.record_function_parameter(context, func.name, i, p)
|
||||||
|
|
||||||
|
function_params = node.children[1].children[1]
|
||||||
|
if hasattr(function_params, "children"):
|
||||||
|
for child in function_params.children: # function parameters
|
||||||
|
self.process_functions(context, service, child)
|
||||||
|
else:
|
||||||
|
for child in node.children:
|
||||||
|
self.process_functions(context, service, child)
|
||||||
|
|
||||||
|
def add_not_found_error(self, sheerka, return_value):
|
||||||
|
error = sheerka.ret(self.name,
|
||||||
|
False,
|
||||||
|
sheerka.new(BuiltinConcepts.NOT_FOUND, body="source code"),
|
||||||
|
parents=[return_value])
|
||||||
|
if return_value.parents is None:
|
||||||
|
return_value.parents = [error]
|
||||||
|
else:
|
||||||
|
return_value.parents.append(error)
|
||||||
|
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_function(node):
|
||||||
|
if len(node.children) == 2 and \
|
||||||
|
isinstance(node.children[0], Name) and \
|
||||||
|
isinstance(node.children[1], PythonNode) and \
|
||||||
|
node.children[1].type == "trailer" and \
|
||||||
|
len(node.children[1].children) >= 2 and \
|
||||||
|
isinstance(node.children[1].children[0], Operator) and \
|
||||||
|
node.children[1].children[0].value == "(" and \
|
||||||
|
isinstance(node.children[1].children[-1], Operator) and \
|
||||||
|
node.children[1].children[-1].value == ")":
|
||||||
|
name = node.children[0].value
|
||||||
|
if len(node.children[1].children) == 2:
|
||||||
|
params = []
|
||||||
|
else:
|
||||||
|
params_nodes = node.children[1].children[1]
|
||||||
|
if hasattr(params_nodes, "children"):
|
||||||
|
params = [p.get_code().strip() for p in params_nodes.children if not isinstance(p, Operator)]
|
||||||
|
|
||||||
|
else:
|
||||||
|
params = [params_nodes.get_code().strip()]
|
||||||
|
return func_found(name=name, params=params)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
@@ -0,0 +1,40 @@
|
|||||||
|
from os import path
|
||||||
|
|
||||||
|
import click
|
||||||
|
from core.sheerka.Sheerka import EXIT_COMMANDS
|
||||||
|
from prompt_toolkit import prompt
|
||||||
|
from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
|
||||||
|
from prompt_toolkit.history import FileHistory
|
||||||
|
from repl.SheerkaPromptCompleter import SheerkaPromptCompleter
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaPrompt:
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
self.sheerka = sheerka
|
||||||
|
self.history_file = path.abspath(path.join(path.expanduser("~"), ".sheerka", "history.txt"))
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
_in = prompt('sheerka> ',
|
||||||
|
history=FileHistory(self.history_file),
|
||||||
|
auto_suggest=AutoSuggestFromHistory(),
|
||||||
|
completer=SheerkaPromptCompleter(self.sheerka),
|
||||||
|
# lexer=PygmentsLexer(PythonLexer)
|
||||||
|
)
|
||||||
|
if _in in EXIT_COMMANDS:
|
||||||
|
print("Take care.")
|
||||||
|
break
|
||||||
|
|
||||||
|
if _in == '__':
|
||||||
|
_in = click.edit()
|
||||||
|
|
||||||
|
result = self.sheerka.evaluate_user_input(_in)
|
||||||
|
self.sheerka.print(result)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
continue
|
||||||
|
except EOFError:
|
||||||
|
print("EOFError...")
|
||||||
|
return 3
|
||||||
|
|
||||||
|
return 0
|
||||||
@@ -0,0 +1,323 @@
|
|||||||
|
# some part of code are taken from
|
||||||
|
# https://github.com/prompt-toolkit/ptpython/blob/89017ba158ed1d95319233fa5aedf3931c3b8b77/ptpython/utils.py#L45
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
from dataclasses import dataclass
|
||||||
|
|
||||||
|
from core.sheerka.Sheerka import EXIT_COMMANDS
|
||||||
|
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory
|
||||||
|
from core.tokenizer import Tokenizer, TokenKind, LexerError
|
||||||
|
from prompt_toolkit.completion import Completer, Completion
|
||||||
|
|
||||||
|
NAME = re.compile(r'[a-zA-Z0-9_\.]*[a-zA-Z_]')
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class FuncFound:
|
||||||
|
"""
|
||||||
|
Class used when inside a function
|
||||||
|
"""
|
||||||
|
name: str # name of the function
|
||||||
|
index: int # index in text
|
||||||
|
paren_index: int # index of the left parenthesis
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class CompletionDesc:
|
||||||
|
text: str
|
||||||
|
display: str
|
||||||
|
meta_display: str
|
||||||
|
|
||||||
|
|
||||||
|
class SheerkaPromptCompleter(Completer):
|
||||||
|
|
||||||
|
def __init__(self, sheerka):
|
||||||
|
self.sheerka = sheerka
|
||||||
|
self.params_history_service = self.sheerka.services[SheerkaFunctionsParametersHistory.NAME]
|
||||||
|
self.builtins = []
|
||||||
|
for name, bound_method in sheerka.sheerka_methods.items():
|
||||||
|
self.builtins.append(self.get_completion_desc(name, bound_method, "builtin", ["context"]))
|
||||||
|
|
||||||
|
self.pipeable_builtins = []
|
||||||
|
for name, pipeable in self.sheerka.sheerka_pipeables.items():
|
||||||
|
self.pipeable_builtins.append(self.get_completion_desc(name, pipeable, "builtin", ["context", "iterable"]))
|
||||||
|
|
||||||
|
self.exit_commands = [CompletionDesc(c, c, "command") for c in EXIT_COMMANDS]
|
||||||
|
self.globals = self.sheerka.sheerka_methods.copy()
|
||||||
|
self.globals.update(self.sheerka.sheerka_pipeables)
|
||||||
|
|
||||||
|
def get_locals(self):
|
||||||
|
return self.sheerka.sheerka_methods
|
||||||
|
|
||||||
|
def get_completions(self, document, complete_event):
|
||||||
|
|
||||||
|
text = document.text_before_cursor
|
||||||
|
|
||||||
|
if func_found := self.inside_function(document.text, document.cursor_position):
|
||||||
|
param_number, comma_index = self.get_param_number(text[func_found.paren_index + 1:])
|
||||||
|
values = self.params_history_service.get_function_parameters(func_found.name, param_number)
|
||||||
|
as_custom_desc = [CompletionDesc(v, v, "history") for v in values]
|
||||||
|
param_text = text[func_found.paren_index + comma_index + 2:].lstrip()
|
||||||
|
yield from self.yield_completion_from_completion_desc(as_custom_desc, param_text)
|
||||||
|
return
|
||||||
|
|
||||||
|
if " " not in text:
|
||||||
|
yield from self.yield_completion_from_completion_desc(self.exit_commands, text)
|
||||||
|
yield from self.yield_completion_from_completion_desc(self.builtins, text)
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.after_pipe(document.text, document.cursor_position):
|
||||||
|
if document.char_before_cursor == " ":
|
||||||
|
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, None)
|
||||||
|
else:
|
||||||
|
text = self.last_word(document.text, document.cursor_position)
|
||||||
|
yield from self.yield_completion_from_completion_desc(self.pipeable_builtins, text)
|
||||||
|
return
|
||||||
|
|
||||||
|
yield from self.yield_completion_from_completion_desc(self.builtins, text)
|
||||||
|
|
||||||
|
def get_completions_fom_jedi(self, document):
|
||||||
|
script = self.get_jedi_script_from_document(document, self.globals, self.globals)
|
||||||
|
if script:
|
||||||
|
try:
|
||||||
|
completions = script.complete()
|
||||||
|
except TypeError:
|
||||||
|
# Issue #9: bad syntax causes completions() to fail in jedi.
|
||||||
|
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/9
|
||||||
|
pass
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
# Issue #43: UnicodeDecodeError on OpenBSD
|
||||||
|
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/43
|
||||||
|
pass
|
||||||
|
except AttributeError:
|
||||||
|
# Jedi issue #513: https://github.com/davidhalter/jedi/issues/513
|
||||||
|
pass
|
||||||
|
except ValueError:
|
||||||
|
# Jedi issue: "ValueError: invalid \x escape"
|
||||||
|
pass
|
||||||
|
except KeyError:
|
||||||
|
# Jedi issue: "KeyError: u'a_lambda'."
|
||||||
|
# https://github.com/jonathanslenders/ptpython/issues/89
|
||||||
|
pass
|
||||||
|
except IOError:
|
||||||
|
# Jedi issue: "IOError: No such file or directory."
|
||||||
|
# https://github.com/jonathanslenders/ptpython/issues/71
|
||||||
|
pass
|
||||||
|
except AssertionError:
|
||||||
|
# In jedi.parser.__init__.py: 227, in remove_last_newline,
|
||||||
|
# the assertion "newline.value.endswith('\n')" can fail.
|
||||||
|
pass
|
||||||
|
except SystemError:
|
||||||
|
# In jedi.api.helpers.py: 144, in get_stack_at_position
|
||||||
|
# raise SystemError("This really shouldn't happen. There's a bug in Jedi.")
|
||||||
|
pass
|
||||||
|
except NotImplementedError:
|
||||||
|
# See: https://github.com/jonathanslenders/ptpython/issues/223
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
# Supress all other Jedi exceptions.
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
for c in completions:
|
||||||
|
yield Completion(
|
||||||
|
c.name_with_symbols,
|
||||||
|
len(c.complete) - len(c.name_with_symbols),
|
||||||
|
display=c.name_with_symbols,
|
||||||
|
)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def yield_completion_from_completion_desc(definitions, text):
|
||||||
|
for completion_desc in definitions:
|
||||||
|
if text is None or text == "":
|
||||||
|
yield Completion(completion_desc.text,
|
||||||
|
0,
|
||||||
|
display=completion_desc.display,
|
||||||
|
display_meta=completion_desc.meta_display)
|
||||||
|
elif completion_desc.text.startswith(text):
|
||||||
|
yield Completion(completion_desc.text,
|
||||||
|
-len(text),
|
||||||
|
display=completion_desc.display,
|
||||||
|
display_meta=completion_desc.meta_display)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_completion_desc(name, function, meta_display, skip_params):
|
||||||
|
|
||||||
|
function_name = name + "("
|
||||||
|
signature = inspect.signature(function)
|
||||||
|
params_count = len([p for p in signature.parameters if p not in skip_params])
|
||||||
|
|
||||||
|
if params_count == 0:
|
||||||
|
function_name += ")"
|
||||||
|
return CompletionDesc(function_name, name, meta_display)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def inside_function(text, pos):
|
||||||
|
bracket_count = 0
|
||||||
|
for i in range(pos)[::-1]:
|
||||||
|
# look for an opening parenthesis that does not match a closing one
|
||||||
|
if text[i] == "(":
|
||||||
|
bracket_count += 1
|
||||||
|
elif text[i] == ")":
|
||||||
|
bracket_count -= 1
|
||||||
|
|
||||||
|
if bracket_count > 0:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return None # nothing found, return false
|
||||||
|
|
||||||
|
paren_index = i
|
||||||
|
|
||||||
|
while i > 1:
|
||||||
|
# eat the whitespaces
|
||||||
|
if text[i - 1] == " ":
|
||||||
|
i -= 1
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
m = NAME.match(text[:i][::-1])
|
||||||
|
func_name = m.group(0)[::-1]
|
||||||
|
return FuncFound(func_name, i - len(func_name), paren_index) if m else None
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def after_pipe(text, pos):
|
||||||
|
for i in range(pos)[::-1]:
|
||||||
|
if text[i] == "|":
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def last_word(text, pos, left_strip=True):
|
||||||
|
if pos == 0:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
start = pos - 1 if text[pos - 1] == " " else pos
|
||||||
|
if start < 0:
|
||||||
|
return ""
|
||||||
|
|
||||||
|
for i in range(start)[::-1]:
|
||||||
|
if text[i] == " ":
|
||||||
|
return text[i:pos].lstrip() if left_strip else text[i:pos]
|
||||||
|
|
||||||
|
return text[:pos].lstrip() if left_strip else text[:pos]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_param_number(text):
|
||||||
|
if text == "":
|
||||||
|
return 0, -1
|
||||||
|
|
||||||
|
tokens = Tokenizer(text)
|
||||||
|
param_number = 0
|
||||||
|
stop_counting = 0
|
||||||
|
last_comma_index = -1
|
||||||
|
try:
|
||||||
|
for token in tokens:
|
||||||
|
if token.type == TokenKind.COMMA and stop_counting == 0:
|
||||||
|
param_number += 1
|
||||||
|
last_comma_index = token.index
|
||||||
|
if token.type == TokenKind.LPAR:
|
||||||
|
stop_counting += 1
|
||||||
|
if token.type == TokenKind.RPAR:
|
||||||
|
stop_counting -= 1
|
||||||
|
except LexerError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return param_number, last_comma_index
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_jedi_script_from_document(document, globals, locals):
|
||||||
|
import jedi # We keep this import in-line, to improve start-up time.
|
||||||
|
|
||||||
|
# Importing Jedi is 'slow'.
|
||||||
|
|
||||||
|
try:
|
||||||
|
return jedi.Interpreter(
|
||||||
|
document.text,
|
||||||
|
column=document.cursor_position_col,
|
||||||
|
line=document.cursor_position_row + 1,
|
||||||
|
path="input-text",
|
||||||
|
namespaces=[locals, globals],
|
||||||
|
)
|
||||||
|
except ValueError:
|
||||||
|
# Invalid cursor position.
|
||||||
|
# ValueError('`column` parameter is not in a valid range.')
|
||||||
|
return None
|
||||||
|
except AttributeError:
|
||||||
|
# Workaround for #65: https://github.com/jonathanslenders/python-prompt-toolkit/issues/65
|
||||||
|
# See also: https://github.com/davidhalter/jedi/issues/508
|
||||||
|
return None
|
||||||
|
except IndexError:
|
||||||
|
# Workaround Jedi issue #514: for https://github.com/davidhalter/jedi/issues/514
|
||||||
|
return None
|
||||||
|
except KeyError:
|
||||||
|
# Workaroud for a crash when the input is "u'", the start of a unicode string.
|
||||||
|
return None
|
||||||
|
except Exception:
|
||||||
|
# Workaround for: https://github.com/jonathanslenders/ptpython/issues/91
|
||||||
|
return None
|
||||||
|
# def find_backwards(
|
||||||
|
# self,
|
||||||
|
# sub: str,
|
||||||
|
# in_current_line: bool = False,
|
||||||
|
# ignore_case: bool = False,
|
||||||
|
# count: int = 1,
|
||||||
|
# ) -> Optional[int]:
|
||||||
|
# """
|
||||||
|
# Find `text` before the cursor, return position relative to the cursor
|
||||||
|
# position. Return `None` if nothing was found.
|
||||||
|
# :param count: Find the n-th occurrence.
|
||||||
|
# """
|
||||||
|
# if in_current_line:
|
||||||
|
# before_cursor = self.current_line_before_cursor[::-1]
|
||||||
|
# else:
|
||||||
|
# before_cursor = self.text_before_cursor[::-1]
|
||||||
|
#
|
||||||
|
# flags = re.IGNORECASE if ignore_case else 0
|
||||||
|
# iterator = re.finditer(re.escape(sub[::-1]), before_cursor, flags)
|
||||||
|
#
|
||||||
|
# try:
|
||||||
|
# for i, match in enumerate(iterator):
|
||||||
|
# if i + 1 == count:
|
||||||
|
# return -match.start(0) - len(sub)
|
||||||
|
# except StopIteration:
|
||||||
|
# pass
|
||||||
|
# return None
|
||||||
|
|
||||||
|
# def find(
|
||||||
|
# self,
|
||||||
|
# sub: str,
|
||||||
|
# in_current_line: bool = False,
|
||||||
|
# include_current_position: bool = False,
|
||||||
|
# ignore_case: bool = False,
|
||||||
|
# count: int = 1,
|
||||||
|
# ) -> Optional[int]:
|
||||||
|
# """
|
||||||
|
# Find `text` after the cursor, return position relative to the cursor
|
||||||
|
# position. Return `None` if nothing was found.
|
||||||
|
# :param count: Find the n-th occurrence.
|
||||||
|
# """
|
||||||
|
# assert isinstance(ignore_case, bool)
|
||||||
|
#
|
||||||
|
# if in_current_line:
|
||||||
|
# text = self.current_line_after_cursor
|
||||||
|
# else:
|
||||||
|
# text = self.text_after_cursor
|
||||||
|
#
|
||||||
|
# if not include_current_position:
|
||||||
|
# if len(text) == 0:
|
||||||
|
# return None # (Otherwise, we always get a match for the empty string.)
|
||||||
|
# else:
|
||||||
|
# text = text[1:]
|
||||||
|
#
|
||||||
|
# flags = re.IGNORECASE if ignore_case else 0
|
||||||
|
# iterator = re.finditer(re.escape(sub), text, flags)
|
||||||
|
#
|
||||||
|
# try:
|
||||||
|
# for i, match in enumerate(iterator):
|
||||||
|
# if i + 1 == count:
|
||||||
|
# if include_current_position:
|
||||||
|
# return match.start(0)
|
||||||
|
# else:
|
||||||
|
# return match.start(0) + 1
|
||||||
|
# except StopIteration:
|
||||||
|
# pass
|
||||||
|
# return None
|
||||||
@@ -0,0 +1,260 @@
|
|||||||
|
# Take from standard library rlcompleter module
|
||||||
|
|
||||||
|
# Changes:
|
||||||
|
|
||||||
|
# - Removed all readline specific stuff. Added logic to split words.
|
||||||
|
|
||||||
|
# - Renamed Completer to DirCompleter (for compatibility with
|
||||||
|
# prompt_toolkit.Completer)
|
||||||
|
|
||||||
|
# - Removed _callable_postfix (code that adds '(' to the completions) because
|
||||||
|
# I don't like it.
|
||||||
|
|
||||||
|
# - Made all completions case insensitive
|
||||||
|
|
||||||
|
# - Compile a regular expression
|
||||||
|
|
||||||
|
# - Prevent things like 1. from completing int attributes
|
||||||
|
|
||||||
|
# 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
|
||||||
|
# the Individual or Organization ("Licensee") accessing and otherwise using Python
|
||||||
|
# 3.6.0 software in source or binary form and its associated documentation.
|
||||||
|
#
|
||||||
|
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||||
|
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||||
|
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||||
|
# distribute, and otherwise use Python 3.6.0 alone or in any derivative
|
||||||
|
# version, provided, however, that PSF's License Agreement and PSF's notice of
|
||||||
|
# copyright, i.e., "Copyright © 2001-2017 Python Software Foundation; All Rights
|
||||||
|
# Reserved" are retained in Python 3.6.0 alone or in any derivative version
|
||||||
|
# prepared by Licensee.
|
||||||
|
#
|
||||||
|
# 3. In the event Licensee prepares a derivative work that is based on or
|
||||||
|
# incorporates Python 3.6.0 or any part thereof, and wants to make the
|
||||||
|
# derivative work available to others as provided herein, then Licensee hereby
|
||||||
|
# agrees to include in any such work a brief summary of the changes made to Python
|
||||||
|
# 3.6.0.
|
||||||
|
#
|
||||||
|
# 4. PSF is making Python 3.6.0 available to Licensee on an "AS IS" basis.
|
||||||
|
# PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
|
||||||
|
# EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
|
||||||
|
# WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
|
||||||
|
# USE OF PYTHON 3.6.0 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
|
||||||
|
#
|
||||||
|
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 3.6.0
|
||||||
|
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
|
||||||
|
# MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 3.6.0, OR ANY DERIVATIVE
|
||||||
|
# THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||||
|
#
|
||||||
|
# 6. This License Agreement will automatically terminate upon a material breach of
|
||||||
|
# its terms and conditions.
|
||||||
|
#
|
||||||
|
# 7. Nothing in this License Agreement shall be deemed to create any relationship
|
||||||
|
# of agency, partnership, or joint venture between PSF and Licensee. This License
|
||||||
|
# Agreement does not grant permission to use PSF trademarks or trade name in a
|
||||||
|
# trademark sense to endorse or promote products or services of Licensee, or any
|
||||||
|
# third party.
|
||||||
|
#
|
||||||
|
# 8. By copying, installing or otherwise using Python 3.6.0, Licensee agrees
|
||||||
|
# to be bound by the terms and conditions of this License Agreement.
|
||||||
|
|
||||||
|
|
||||||
|
"""Word completion for GNU readline.
|
||||||
|
|
||||||
|
The completer completes keywords, built-ins and globals in a selectable
|
||||||
|
namespace (which defaults to __main__); when completing NAME.NAME..., it
|
||||||
|
evaluates (!) the expression up to the last dot and completes its attributes.
|
||||||
|
|
||||||
|
It's very cool to do "import sys" type "sys.", hit the completion key (twice),
|
||||||
|
and see the list of names defined by the sys module!
|
||||||
|
|
||||||
|
Tip: to use the tab key as the completion key, call
|
||||||
|
|
||||||
|
readline.parse_and_bind("tab: complete")
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
|
||||||
|
- Exceptions raised by the completer function are *ignored* (and generally cause
|
||||||
|
the completion to fail). This is a feature -- since readline sets the tty
|
||||||
|
device in raw (or cbreak) mode, printing a traceback wouldn't work well
|
||||||
|
without some complicated hoopla to save, reset and restore the tty state.
|
||||||
|
|
||||||
|
- The evaluation of the NAME.NAME... form may cause arbitrary application
|
||||||
|
defined code to be executed if an object with a __getattr__ hook is found.
|
||||||
|
Since it is the responsibility of the application (or the user) to enable this
|
||||||
|
feature, I consider this an acceptable risk. More complicated expressions
|
||||||
|
(e.g. function calls or indexing operations) are *not* evaluated.
|
||||||
|
|
||||||
|
- When the original stdin is not a tty device, GNU readline is never
|
||||||
|
used, and this module (and the readline module) are silently inactive.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import builtins
|
||||||
|
import re
|
||||||
|
|
||||||
|
# import __main__
|
||||||
|
|
||||||
|
__all__ = ["DirCompleter"]
|
||||||
|
|
||||||
|
|
||||||
|
class DirCompleter:
|
||||||
|
def __init__(self, namespace=None):
|
||||||
|
"""Create a new completer for the command line.
|
||||||
|
|
||||||
|
Completer([namespace]) -> completer instance.
|
||||||
|
|
||||||
|
If unspecified, the default namespace where completions are performed
|
||||||
|
is __main__ (technically, __main__.__dict__). Namespaces should be
|
||||||
|
given as dictionaries.
|
||||||
|
|
||||||
|
Completer instances should be used as the completion mechanism of
|
||||||
|
readline via the set_completer() call:
|
||||||
|
|
||||||
|
readline.set_completer(Completer(my_namespace).complete)
|
||||||
|
"""
|
||||||
|
|
||||||
|
if namespace and not isinstance(namespace, dict):
|
||||||
|
raise TypeError('namespace must be a dictionary')
|
||||||
|
|
||||||
|
# Don't bind to namespace quite yet, but flag whether the user wants a
|
||||||
|
# specific namespace or to use __main__.__dict__. This will allow us
|
||||||
|
# to bind to __main__.__dict__ at completion time, not now.
|
||||||
|
if namespace is None:
|
||||||
|
self.use_main_ns = 1
|
||||||
|
else:
|
||||||
|
self.use_main_ns = 0
|
||||||
|
self.namespace = namespace
|
||||||
|
|
||||||
|
NAME = re.compile(r'[a-zA-Z0-9_\.]*[a-zA-Z_]')
|
||||||
|
|
||||||
|
def complete(self, text, state):
|
||||||
|
"""Return the next possible completion for 'text'.
|
||||||
|
|
||||||
|
This is called successively with state == 0, 1, 2, ... until it
|
||||||
|
returns None. The completion should begin with 'text'.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if text.startswith('import ') or text.startswith('from '):
|
||||||
|
return None
|
||||||
|
|
||||||
|
m = self.NAME.match(text[::-1])
|
||||||
|
if not m:
|
||||||
|
return None
|
||||||
|
|
||||||
|
text = m.group(0)[::-1]
|
||||||
|
# if self.use_main_ns:
|
||||||
|
# self.namespace = __main__.__dict__
|
||||||
|
|
||||||
|
if not text.strip():
|
||||||
|
return None
|
||||||
|
|
||||||
|
if state == 0:
|
||||||
|
if "." in text:
|
||||||
|
self.matches = self.attr_matches(text)
|
||||||
|
else:
|
||||||
|
self.matches = self.global_matches(text)
|
||||||
|
try:
|
||||||
|
return self.matches[state]
|
||||||
|
except IndexError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def global_matches(self, text):
|
||||||
|
"""Compute matches when text is a simple name.
|
||||||
|
|
||||||
|
Return a list of all keywords, built-in functions and names currently
|
||||||
|
defined in self.namespace that match.
|
||||||
|
|
||||||
|
"""
|
||||||
|
import keyword
|
||||||
|
matches = []
|
||||||
|
seen = {"__builtins__"}
|
||||||
|
lower_text = text.lower()
|
||||||
|
n = len(text)
|
||||||
|
for word in keyword.kwlist:
|
||||||
|
lower_word = word.lower()
|
||||||
|
if lower_word[:n] == text:
|
||||||
|
seen.add(word)
|
||||||
|
if word in {'finally', 'try'}:
|
||||||
|
word = word + ':'
|
||||||
|
elif word not in {'False', 'None', 'True',
|
||||||
|
'break', 'continue', 'pass',
|
||||||
|
'else'}:
|
||||||
|
word = word + ' '
|
||||||
|
matches.append(word)
|
||||||
|
for nspace in [self.namespace, builtins.__dict__]:
|
||||||
|
for word, val in nspace.items():
|
||||||
|
lower_word = word.lower()
|
||||||
|
if lower_word[:n] == lower_text and word not in seen:
|
||||||
|
seen.add(word)
|
||||||
|
matches.append(word)
|
||||||
|
return matches
|
||||||
|
|
||||||
|
ATTRIBUTE = re.compile(r"(\w+(\.\w+)*)\.(\w*)")
|
||||||
|
|
||||||
|
def attr_matches(self, text):
|
||||||
|
"""Compute matches when text contains a dot.
|
||||||
|
|
||||||
|
Assuming the text is of the form NAME.NAME....[NAME], and is
|
||||||
|
evaluable in self.namespace, it will be evaluated and its attributes
|
||||||
|
(as revealed by dir()) are used as possible completions. (For class
|
||||||
|
instances, class members are also considered.)
|
||||||
|
|
||||||
|
WARNING: this can still invoke arbitrary C code, if an object
|
||||||
|
with a __getattr__ hook is evaluated.
|
||||||
|
|
||||||
|
"""
|
||||||
|
m = self.ATTRIBUTE.match(text)
|
||||||
|
if not m:
|
||||||
|
return []
|
||||||
|
expr, attr = m.group(1, 3)
|
||||||
|
lower_attr = attr.lower()
|
||||||
|
try:
|
||||||
|
thisobject = eval(expr, self.namespace)
|
||||||
|
except Exception:
|
||||||
|
# Try to get a case insensitive version
|
||||||
|
for i in self.namespace:
|
||||||
|
if i.lower() == expr.lower():
|
||||||
|
expr = i
|
||||||
|
thisobject = eval(i, self.namespace)
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
# get the content of the object, except __builtins__
|
||||||
|
words = set(dir(thisobject))
|
||||||
|
words.discard("__builtins__")
|
||||||
|
|
||||||
|
if hasattr(thisobject, '__class__'):
|
||||||
|
words.add('__class__')
|
||||||
|
words.update(get_class_members(thisobject.__class__))
|
||||||
|
matches = []
|
||||||
|
n = len(attr)
|
||||||
|
if attr == '':
|
||||||
|
noprefix = '_'
|
||||||
|
elif attr == '_':
|
||||||
|
noprefix = '__'
|
||||||
|
else:
|
||||||
|
noprefix = None
|
||||||
|
while True:
|
||||||
|
for word in words:
|
||||||
|
lower_word = word.lower()
|
||||||
|
if (lower_word[:n] == lower_attr and
|
||||||
|
not (noprefix and word[:n + 1] == noprefix)):
|
||||||
|
match = "%s.%s" % (expr, word)
|
||||||
|
matches.append(match)
|
||||||
|
if matches or not noprefix:
|
||||||
|
break
|
||||||
|
if noprefix == '_':
|
||||||
|
noprefix = '__'
|
||||||
|
else:
|
||||||
|
noprefix = None
|
||||||
|
matches.sort()
|
||||||
|
return matches
|
||||||
|
|
||||||
|
|
||||||
|
def get_class_members(klass):
|
||||||
|
ret = dir(klass)
|
||||||
|
if hasattr(klass, '__bases__'):
|
||||||
|
for base in klass.__bases__:
|
||||||
|
ret = ret + get_class_members(base)
|
||||||
|
return ret
|
||||||
+2
-2
@@ -87,10 +87,10 @@ class BaseTest:
|
|||||||
return sheerka.ret(who, True, obj)
|
return sheerka.ret(who, True, obj)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def pretval(concept, source=None, parser="parsers.name"):
|
def pretval(concept, source=None, parser="parsers.name", who="some_name"):
|
||||||
"""ParserResult ret_val (p stands for ParserResult)"""
|
"""ParserResult ret_val (p stands for ParserResult)"""
|
||||||
return ReturnValueConcept(
|
return ReturnValueConcept(
|
||||||
"some_name",
|
who,
|
||||||
True,
|
True,
|
||||||
ParserResultConcept(parser=parser,
|
ParserResultConcept(parser=parser,
|
||||||
source=source or concept.name,
|
source=source or concept.name,
|
||||||
|
|||||||
@@ -0,0 +1,87 @@
|
|||||||
|
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory, \
|
||||||
|
FunctionParametersObj
|
||||||
|
|
||||||
|
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
|
||||||
|
|
||||||
|
|
||||||
|
class TestSheerkaFunctionsParametersHistory(TestUsingMemoryBasedSheerka):
|
||||||
|
def test_i_can_add_a_parameter_value(self):
|
||||||
|
sheerka, context = self.init_concepts(cache_only=False)
|
||||||
|
service = SheerkaFunctionsParametersHistory(sheerka).initialize()
|
||||||
|
|
||||||
|
service.record_function_parameter(context, "function", 1, "10")
|
||||||
|
service.record_function_parameter(context, "function", 2, "True")
|
||||||
|
service.record_function_parameter(context, "function", 3, "'string value'")
|
||||||
|
|
||||||
|
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||||
|
context.event.get_digest(),
|
||||||
|
"function",
|
||||||
|
{
|
||||||
|
1: [('10', 1)],
|
||||||
|
2: [('True', 1)],
|
||||||
|
3: [("'string value'", 1)]
|
||||||
|
})}
|
||||||
|
|
||||||
|
# and i can serialize
|
||||||
|
sheerka.cache_manager.commit(context)
|
||||||
|
from_db = sheerka.sdp.get(SheerkaFunctionsParametersHistory.FUNCTIONS_PARAMETERS_ENTRY, "function")
|
||||||
|
assert from_db.event_id == context.event.get_digest()
|
||||||
|
assert from_db.name == "function"
|
||||||
|
assert from_db.params == {
|
||||||
|
1: [('10', 1)],
|
||||||
|
2: [('True', 1)],
|
||||||
|
3: [("'string value'", 1)]
|
||||||
|
}
|
||||||
|
|
||||||
|
def test_i_can_add_the_same_value_multiple_times(self):
|
||||||
|
sheerka, context = self.init_concepts(cache_only=True)
|
||||||
|
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||||
|
|
||||||
|
service.record_function_parameter(context, "function", 1, "10")
|
||||||
|
service.record_function_parameter(context, "function", 1, "20")
|
||||||
|
service.record_function_parameter(context, "function", 2, "True")
|
||||||
|
service.record_function_parameter(context, "function", 1, "20")
|
||||||
|
|
||||||
|
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||||
|
context.event.get_digest(),
|
||||||
|
"function",
|
||||||
|
{
|
||||||
|
1: [('10', 1), ('20', 2)],
|
||||||
|
2: [('True', 1)],
|
||||||
|
})}
|
||||||
|
|
||||||
|
def test_i_can_specify_parameter_in_any_order(self):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||||
|
|
||||||
|
service.record_function_parameter(context, "function", 3, "'string value'")
|
||||||
|
service.record_function_parameter(context, "function", 2, "True")
|
||||||
|
|
||||||
|
assert service.cache.copy() == {"function": FunctionParametersObj(
|
||||||
|
context.event.get_digest(),
|
||||||
|
"function",
|
||||||
|
{
|
||||||
|
2: [('True', 1)],
|
||||||
|
3: [("'string value'", 1)]
|
||||||
|
})}
|
||||||
|
|
||||||
|
def test_no_value_is_managed(self):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||||
|
|
||||||
|
# no entry for the function
|
||||||
|
assert service.get_function_parameters("function", 2) == []
|
||||||
|
|
||||||
|
# no entry for the parameter number
|
||||||
|
service.record_function_parameter(context, "function", 1, "'string value'")
|
||||||
|
assert service.get_function_parameters("function", 2) == []
|
||||||
|
|
||||||
|
def test_i_can_get_sorted_parameters(self):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
service = SheerkaFunctionsParametersHistory(sheerka)
|
||||||
|
|
||||||
|
service.record_function_parameter(context, "function", 2, "'string value'")
|
||||||
|
service.record_function_parameter(context, "function", 2, "True")
|
||||||
|
service.record_function_parameter(context, "function", 2, "True")
|
||||||
|
|
||||||
|
assert service.get_function_parameters("function", 2) == ["True", "'string value'"]
|
||||||
@@ -187,6 +187,27 @@ class EvaluatorAllSuppressFooEntry(EvaluatorAllWithPriority):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class EvaluatorOneDoNotModifyExecutionFlow(EvaluatorOneWithPriority):
|
||||||
|
"""
|
||||||
|
To test that when eval() returns the initial return_value, the execution flow is not modified
|
||||||
|
ie : the new return_value is not added and the old one is not removed
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__("do_no_modify_flow", 50)
|
||||||
|
|
||||||
|
def matches(self, context, return_value):
|
||||||
|
super().matches(context, return_value)
|
||||||
|
return context.sheerka.isinstance(return_value.body, "foo")
|
||||||
|
|
||||||
|
def eval(self, context, return_value):
|
||||||
|
super().eval(context, return_value)
|
||||||
|
|
||||||
|
# I can modify the return_value, but I must return it
|
||||||
|
return_value.parents = [BaseTest.tretval(context.sheerka, Concept("ERROR"))]
|
||||||
|
return return_value
|
||||||
|
|
||||||
|
|
||||||
class TestSheerkaExecuteEvaluators(TestUsingMemoryBasedSheerka):
|
class TestSheerkaExecuteEvaluators(TestUsingMemoryBasedSheerka):
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@@ -422,3 +443,45 @@ class TestSheerkaExecuteEvaluators(TestUsingMemoryBasedSheerka):
|
|||||||
"__EVALUATION [0] init_multiple - init_evaluator - target=['foo', 'bar', 'baz']",
|
"__EVALUATION [0] init_multiple - init_evaluator - target=['foo', 'bar', 'baz']",
|
||||||
'__EVALUATION [0] init_multiple - eval - target=baz',
|
'__EVALUATION [0] init_multiple - eval - target=baz',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def test_return_value_is_not_removed_if_same_as_input(self):
|
||||||
|
"""
|
||||||
|
In this test, EvaluatorOneDoNotModifyExecutionFlow returns the initial return_value
|
||||||
|
So the initial entries are not modified
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
sheerka.evaluators = [EvaluatorOneDoNotModifyExecutionFlow]
|
||||||
|
|
||||||
|
entries = [self.tretval(sheerka, Concept("foo"))]
|
||||||
|
Out.debug_out = []
|
||||||
|
res = sheerka.execute(self.get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
|
||||||
|
|
||||||
|
assert Out.debug_out == [
|
||||||
|
'__EVALUATION [0] do_no_modify_flow - matches - target=foo',
|
||||||
|
'__EVALUATION [0] do_no_modify_flow - eval - target=foo',
|
||||||
|
]
|
||||||
|
|
||||||
|
assert res == entries
|
||||||
|
|
||||||
|
def test_new_return_value_is_added_and_old_return_value_is_removed(self):
|
||||||
|
"""
|
||||||
|
In this test EvaluatorOneModifyFoo modifies 'foo' into 'bar'
|
||||||
|
So the new return_value (with 'bar' is added) and the old one (with 'foo') is removed
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
sheerka.evaluators = [EvaluatorOneModifyFoo]
|
||||||
|
|
||||||
|
entries = [self.tretval(sheerka, Concept("foo"))]
|
||||||
|
Out.debug_out = []
|
||||||
|
res = sheerka.execute(self.get_context(sheerka), entries, [BuiltinConcepts.EVALUATION])
|
||||||
|
|
||||||
|
assert Out.debug_out == [
|
||||||
|
'__EVALUATION [0] modifyFoo - matches - target=foo',
|
||||||
|
'__EVALUATION [0] modifyFoo - eval - target=foo',
|
||||||
|
'__EVALUATION [1] modifyFoo - matches - target=bar',
|
||||||
|
]
|
||||||
|
|
||||||
|
# check that 'foo' is no longer in res, but 'bar' is added
|
||||||
|
assert res == [self.tretval(sheerka, Concept("bar"))]
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from core.tokenizer import Tokenizer, Token, TokenKind, LexerError, Keywords
|
|||||||
|
|
||||||
def test_i_can_tokenize():
|
def test_i_can_tokenize():
|
||||||
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
source = "+*-/{}[]() ,;:.?\n\n\r\r\r\nidentifier_0\t \t10.15 10 'string\n' \"another string\"=|&<>c:name:"
|
||||||
source += "$£€!_identifier°~_^\\`==#__var__10"
|
source += "$£€!_identifier°~_^\\`==#__var__10r/regex\nregex/"
|
||||||
tokens = list(Tokenizer(source))
|
tokens = list(Tokenizer(source))
|
||||||
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
assert tokens[0] == Token(TokenKind.PLUS, "+", 0, 1, 1)
|
||||||
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
assert tokens[1] == Token(TokenKind.STAR, "*", 1, 1, 2)
|
||||||
@@ -33,30 +33,31 @@ def test_i_can_tokenize():
|
|||||||
assert tokens[24] == Token(TokenKind.NUMBER, "10", 47, 5, 23)
|
assert tokens[24] == Token(TokenKind.NUMBER, "10", 47, 5, 23)
|
||||||
assert tokens[25] == Token(TokenKind.WHITESPACE, " ", 49, 5, 25)
|
assert tokens[25] == Token(TokenKind.WHITESPACE, " ", 49, 5, 25)
|
||||||
assert tokens[26] == Token(TokenKind.STRING, "'string\n'", 50, 5, 26)
|
assert tokens[26] == Token(TokenKind.STRING, "'string\n'", 50, 5, 26)
|
||||||
assert tokens[27] == Token(TokenKind.WHITESPACE, " ", 59, 6, 1)
|
assert tokens[27] == Token(TokenKind.WHITESPACE, " ", 59, 6, 2)
|
||||||
assert tokens[28] == Token(TokenKind.STRING, '"another string"', 60, 6, 2)
|
assert tokens[28] == Token(TokenKind.STRING, '"another string"', 60, 6, 3)
|
||||||
assert tokens[29] == Token(TokenKind.EQUALS, '=', 76, 6, 18)
|
assert tokens[29] == Token(TokenKind.EQUALS, '=', 76, 6, 19)
|
||||||
assert tokens[30] == Token(TokenKind.VBAR, '|', 77, 6, 19)
|
assert tokens[30] == Token(TokenKind.VBAR, '|', 77, 6, 20)
|
||||||
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 20)
|
assert tokens[31] == Token(TokenKind.AMPER, '&', 78, 6, 21)
|
||||||
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 21)
|
assert tokens[32] == Token(TokenKind.LESS, '<', 79, 6, 22)
|
||||||
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 22)
|
assert tokens[33] == Token(TokenKind.GREATER, '>', 80, 6, 23)
|
||||||
assert tokens[34] == Token(TokenKind.CONCEPT, ('name', None), 81, 6, 23)
|
assert tokens[34] == Token(TokenKind.CONCEPT, ('name', None), 81, 6, 24)
|
||||||
assert tokens[35] == Token(TokenKind.DOLLAR, '$', 88, 6, 30)
|
assert tokens[35] == Token(TokenKind.DOLLAR, '$', 88, 6, 31)
|
||||||
assert tokens[36] == Token(TokenKind.STERLING, '£', 89, 6, 31)
|
assert tokens[36] == Token(TokenKind.STERLING, '£', 89, 6, 32)
|
||||||
assert tokens[37] == Token(TokenKind.EURO, '€', 90, 6, 32)
|
assert tokens[37] == Token(TokenKind.EURO, '€', 90, 6, 33)
|
||||||
assert tokens[38] == Token(TokenKind.EMARK, '!', 91, 6, 33)
|
assert tokens[38] == Token(TokenKind.EMARK, '!', 91, 6, 34)
|
||||||
assert tokens[39] == Token(TokenKind.IDENTIFIER, '_identifier', 92, 6, 34)
|
assert tokens[39] == Token(TokenKind.IDENTIFIER, '_identifier', 92, 6, 35)
|
||||||
assert tokens[40] == Token(TokenKind.DEGREE, '°', 103, 6, 45)
|
assert tokens[40] == Token(TokenKind.DEGREE, '°', 103, 6, 46)
|
||||||
assert tokens[41] == Token(TokenKind.TILDE, '~', 104, 6, 46)
|
assert tokens[41] == Token(TokenKind.TILDE, '~', 104, 6, 47)
|
||||||
assert tokens[42] == Token(TokenKind.UNDERSCORE, '_', 105, 6, 47)
|
assert tokens[42] == Token(TokenKind.UNDERSCORE, '_', 105, 6, 48)
|
||||||
assert tokens[43] == Token(TokenKind.CARAT, '^', 106, 6, 48)
|
assert tokens[43] == Token(TokenKind.CARAT, '^', 106, 6, 49)
|
||||||
assert tokens[44] == Token(TokenKind.BACK_SLASH, '\\', 107, 6, 49)
|
assert tokens[44] == Token(TokenKind.BACK_SLASH, '\\', 107, 6, 50)
|
||||||
assert tokens[45] == Token(TokenKind.BACK_QUOTE, '`', 108, 6, 50)
|
assert tokens[45] == Token(TokenKind.BACK_QUOTE, '`', 108, 6, 51)
|
||||||
assert tokens[46] == Token(TokenKind.EQUALSEQUALS, '==', 109, 6, 51)
|
assert tokens[46] == Token(TokenKind.EQUALSEQUALS, '==', 109, 6, 52)
|
||||||
assert tokens[47] == Token(TokenKind.HASH, '#', 111, 6, 53)
|
assert tokens[47] == Token(TokenKind.HASH, '#', 111, 6, 54)
|
||||||
assert tokens[48] == Token(TokenKind.VAR_DEF, '__var__10', 112, 6, 54)
|
assert tokens[48] == Token(TokenKind.VAR_DEF, '__var__10', 112, 6, 55)
|
||||||
|
assert tokens[49] == Token(TokenKind.REGEX, '/regex\nregex/', 121, 6, 64)
|
||||||
|
|
||||||
assert tokens[49] == Token(TokenKind.EOF, '', 121, 6, 63)
|
assert tokens[50] == Token(TokenKind.EOF, '', 135, 7, 7)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("text, expected", [
|
@pytest.mark.parametrize("text, expected", [
|
||||||
@@ -121,30 +122,29 @@ def test_i_can_detect_tokenizer_errors(text, message, error_text, index, line, c
|
|||||||
assert e.value.column == column
|
assert e.value.column == column
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("text, expected_text, expected_newlines", [
|
@pytest.mark.parametrize("text, expected_text, expected_newlines, expected_column", [
|
||||||
("'foo'", "'foo'", 0),
|
("'foo'", "'foo'", 0, 6),
|
||||||
('"foo"', '"foo"', 0),
|
('"foo"', '"foo"', 0, 6),
|
||||||
("'foo\rbar'", "'foo\rbar'", 1),
|
("'foo\nbar'", "'foo\nbar'", 1, 5),
|
||||||
("'foo\nbar'", "'foo\nbar'", 1),
|
("'foo\rbar'", "'foo\rbar'", 0, 10),
|
||||||
("'foo\n\rbar'", "'foo\n\rbar'", 1),
|
("'foo\n\rbar'", "'foo\n\rbar'", 1, 6),
|
||||||
("'foo\r\nbar'", "'foo\r\nbar'", 1),
|
("'foo\r\nbar'", "'foo\r\nbar'", 1, 5),
|
||||||
("'foo\r\rbar'", "'foo\r\rbar'", 2),
|
("'foo\n\nbar'", "'foo\n\nbar'", 2, 5),
|
||||||
("'foo\n\nbar'", "'foo\n\nbar'", 2),
|
("'foo\r\n\n\rbar'", "'foo\r\n\n\rbar'", 2, 6),
|
||||||
("'foo\r\n\n\rbar'", "'foo\r\n\n\rbar'", 2),
|
("'\nfoo\nbar\n'", "'\nfoo\nbar\n'", 3, 2),
|
||||||
("'\rfoo\rbar\r'", "'\rfoo\rbar\r'", 3),
|
("'\n\rfoo\r\n'", "'\n\rfoo\r\n'", 2, 2),
|
||||||
("'\nfoo\nbar\n'", "'\nfoo\nbar\n'", 3),
|
(r"'foo\'bar'", r"'foo\'bar'", 0, 11),
|
||||||
("'\n\rfoo\r\n'", "'\n\rfoo\r\n'", 2),
|
(r'"foo\"bar"', r'"foo\"bar"', 0, 11),
|
||||||
(r"'foo\'bar'", r"'foo\'bar'", 0),
|
('"foo"bar"', '"foo"', 0, 6),
|
||||||
(r'"foo\"bar"', r'"foo\"bar"', 0),
|
("'foo'bar'", "'foo'", 0, 6),
|
||||||
('"foo"bar"', '"foo"', 0),
|
|
||||||
("'foo'bar'", "'foo'", 0),
|
|
||||||
])
|
])
|
||||||
def test_i_can_parse_strings(text, expected_text, expected_newlines):
|
def test_i_can_parse_strings(text, expected_text, expected_newlines, expected_column):
|
||||||
lexer = Tokenizer(text)
|
lexer = Tokenizer(text)
|
||||||
text_found, nb_of_newlines = lexer.eat_string(0, 1, 1)
|
text_found, nb_of_newlines, column_index = lexer.eat_string(0, 1, 1)
|
||||||
|
|
||||||
assert nb_of_newlines == expected_newlines
|
|
||||||
assert text_found == expected_text
|
assert text_found == expected_text
|
||||||
|
assert nb_of_newlines == expected_newlines
|
||||||
|
assert column_index == expected_column
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize("text", [
|
@pytest.mark.parametrize("text", [
|
||||||
|
|||||||
@@ -0,0 +1,68 @@
|
|||||||
|
import pytest
|
||||||
|
from core.builtin_concepts import ReturnValueConcept, BuiltinConcepts
|
||||||
|
from core.concept import Concept
|
||||||
|
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory
|
||||||
|
from evaluators.UpdateFunctionsParametersEvaluator import UpdateFunctionsParametersEvaluator
|
||||||
|
from parsers.PythonParser import PythonNode
|
||||||
|
|
||||||
|
from tests.BaseTest import BaseTest
|
||||||
|
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
|
||||||
|
|
||||||
|
r = ReturnValueConcept
|
||||||
|
|
||||||
|
|
||||||
|
class TestUpdateFunctionsParametersEvaluator(TestUsingMemoryBasedSheerka):
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("return_value, expected_result", [
|
||||||
|
(r("evaluators.Python", True), True),
|
||||||
|
(r("evaluators.Python", False), False),
|
||||||
|
(r("other_name", True), False),
|
||||||
|
(r("other_name", False), False),
|
||||||
|
])
|
||||||
|
def test_i_can_match(self, return_value, expected_result):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
evaluator = UpdateFunctionsParametersEvaluator()
|
||||||
|
|
||||||
|
assert evaluator.matches(context, return_value) == expected_result
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("return_value", [
|
||||||
|
r("evaluators.Python", True),
|
||||||
|
r("evaluators.Python", True, parents=[]),
|
||||||
|
r("evaluators.Python", True, parents=[BaseTest.pretval(Concept(), who="notPythonParser")]),
|
||||||
|
])
|
||||||
|
def test_i_cannot_eval_if_original_parser_is_not_found(self, return_value):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
evaluator = UpdateFunctionsParametersEvaluator()
|
||||||
|
|
||||||
|
res = evaluator.eval(context, return_value)
|
||||||
|
|
||||||
|
assert res == return_value
|
||||||
|
assert not res.parents[-1].status
|
||||||
|
assert sheerka.isinstance(res.parents[-1].body, BuiltinConcepts.NOT_FOUND)
|
||||||
|
assert res.parents[-1].body.body == "source code"
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("source, func_name, param_number, expected", [
|
||||||
|
("func()", "func", 0, []),
|
||||||
|
("func(10)", "func", 0, ["10"]),
|
||||||
|
("func(10, True, 'some string')", "func", 0, ["10"]),
|
||||||
|
("func(10, True, 'some string')", "func", 1, ["True"]),
|
||||||
|
("func(10, True, 'some string')", "func", 2, ["'some string'"]),
|
||||||
|
("func1(10) | func2(20)", "func2", 0, ["20"]),
|
||||||
|
("func1(10, func2(20), 'string')", "func1", 0, ["10"]),
|
||||||
|
("func1(10, func2(20), 'string')", "func1", 1, ["func2(20)"]),
|
||||||
|
("func1(10, func2(20), 'string')", "func1", 2, ["'string'"]),
|
||||||
|
("func1(10, func2(20), 'string')", "func2", 0, ["20"]),
|
||||||
|
])
|
||||||
|
def test_i_can_record_functions_parameters(self, source, func_name, param_number, expected):
|
||||||
|
sheerka, context = self.init_concepts()
|
||||||
|
|
||||||
|
parser_ret_val = self.pretval(PythonNode(source), who="parsers.Python", source=source)
|
||||||
|
return_value = r("evaluators.Python", True, parents=[parser_ret_val])
|
||||||
|
evaluator = UpdateFunctionsParametersEvaluator()
|
||||||
|
|
||||||
|
res = evaluator.eval(context, return_value)
|
||||||
|
|
||||||
|
assert res == return_value
|
||||||
|
|
||||||
|
service = sheerka.services[SheerkaFunctionsParametersHistory.NAME]
|
||||||
|
assert service.get_function_parameters(func_name, param_number) == expected
|
||||||
@@ -0,0 +1,157 @@
|
|||||||
|
import pytest
|
||||||
|
from core.sheerka.services.SheerkaFunctionsParametersHistory import SheerkaFunctionsParametersHistory
|
||||||
|
from prompt_toolkit.completion import CompleteEvent
|
||||||
|
from prompt_toolkit.document import Document
|
||||||
|
from repl.SheerkaPromptCompleter import SheerkaPromptCompleter, FuncFound
|
||||||
|
|
||||||
|
from tests.TestUsingMemoryBasedSheerka import TestUsingMemoryBasedSheerka
|
||||||
|
|
||||||
|
|
||||||
|
class TestSheerkaPromptCompleter(TestUsingMemoryBasedSheerka):
|
||||||
|
def test_i_can_complete_with_builtins(self):
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
|
||||||
|
completions = SheerkaPromptCompleter(sheerka).get_completions(Document("get"), CompleteEvent())
|
||||||
|
as_dict = {c.display_text: c for c in completions}
|
||||||
|
|
||||||
|
assert "get_partition" in as_dict
|
||||||
|
assert as_dict["get_partition"].text == "get_partition("
|
||||||
|
assert as_dict["get_partition"].display_text == "get_partition"
|
||||||
|
assert as_dict["get_partition"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
assert "get_results" in as_dict
|
||||||
|
assert as_dict["get_results"].text == "get_results()"
|
||||||
|
assert as_dict["get_results"].display_text == "get_results"
|
||||||
|
assert as_dict["get_results"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
def test_i_can_complete_with_commands(self):
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
|
||||||
|
completions = SheerkaPromptCompleter(sheerka).get_completions(Document("q"), CompleteEvent())
|
||||||
|
as_dict = {c.display_text: c for c in completions}
|
||||||
|
|
||||||
|
assert "quit" in as_dict
|
||||||
|
assert as_dict["quit"].text == "quit"
|
||||||
|
assert as_dict["quit"].display_text == "quit"
|
||||||
|
assert as_dict["quit"].display_meta_text == "command"
|
||||||
|
|
||||||
|
def test_i_can_complete_with_pipeable(self):
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
|
||||||
|
document = Document("| ")
|
||||||
|
completions = SheerkaPromptCompleter(sheerka).get_completions(document, CompleteEvent())
|
||||||
|
as_dict = {c.display_text: c for c in completions}
|
||||||
|
assert "first" in as_dict
|
||||||
|
assert as_dict["first"].text == "first()"
|
||||||
|
assert as_dict["first"].display_text == "first"
|
||||||
|
assert as_dict["first"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
assert "filter" in as_dict
|
||||||
|
assert as_dict["filter"].text == "filter("
|
||||||
|
assert as_dict["filter"].display_text == "filter"
|
||||||
|
assert as_dict["filter"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
def test_i_can_complete_with_pipeable_when_starting_to_write(self):
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
|
||||||
|
document = Document("| f")
|
||||||
|
completions = SheerkaPromptCompleter(sheerka).get_completions(document, CompleteEvent())
|
||||||
|
as_dict = {c.display_text: c for c in completions}
|
||||||
|
assert "first" in as_dict
|
||||||
|
assert as_dict["first"].text == "first()"
|
||||||
|
assert as_dict["first"].display_text == "first"
|
||||||
|
assert as_dict["first"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
assert "filter" in as_dict
|
||||||
|
assert as_dict["filter"].text == "filter("
|
||||||
|
assert as_dict["filter"].display_text == "filter"
|
||||||
|
assert as_dict["filter"].display_meta_text == "builtin"
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, expected", [
|
||||||
|
("func(", ["10", "20", "30"]),
|
||||||
|
("func(1", ["10"]),
|
||||||
|
("func( 1", ["10"]),
|
||||||
|
("func( 10, ", ["'hello'"]),
|
||||||
|
("func( 10, v", []),
|
||||||
|
("func( 10, 'hel", ["'hello'"]),
|
||||||
|
('func( 10, "hel', []),
|
||||||
|
("func('hell,,', func2(2,4), 'w", ["'world'"]),
|
||||||
|
])
|
||||||
|
def test_i_can_complete_function_parameters(self, text, expected):
|
||||||
|
sheerka = self.get_sheerka()
|
||||||
|
context = self.get_context(sheerka)
|
||||||
|
params_history_service = sheerka.services[SheerkaFunctionsParametersHistory.NAME]
|
||||||
|
params_history_service.record_function_parameter(context, "func", 0, "10")
|
||||||
|
params_history_service.record_function_parameter(context, "func", 0, "20")
|
||||||
|
params_history_service.record_function_parameter(context, "func", 0, "30")
|
||||||
|
params_history_service.record_function_parameter(context, "func", 1, "'hello'")
|
||||||
|
params_history_service.record_function_parameter(context, "func", 2, "'world'")
|
||||||
|
|
||||||
|
document = Document(text)
|
||||||
|
completions = SheerkaPromptCompleter(sheerka).get_completions(document, CompleteEvent())
|
||||||
|
as_list = [c.display_text for c in completions]
|
||||||
|
assert as_list == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, pos, expected", [
|
||||||
|
("", 0, False),
|
||||||
|
("foo", 3, False),
|
||||||
|
("|", 1, True),
|
||||||
|
("xxx | foo", 9, True),
|
||||||
|
("xxx | foo", 5, True),
|
||||||
|
("xxx | foo", 4, False),
|
||||||
|
])
|
||||||
|
def test_after_pipe(self, text, pos, expected):
|
||||||
|
assert SheerkaPromptCompleter.after_pipe(text, pos) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, pos, expected", [
|
||||||
|
("", 0, ""),
|
||||||
|
("foo", 3, "foo"),
|
||||||
|
("foo ", 4, "foo "),
|
||||||
|
("foo", 2, "fo"),
|
||||||
|
("foo bar", 7, "bar"),
|
||||||
|
("foo bar", 4, "foo "),
|
||||||
|
])
|
||||||
|
def test_last_word(self, text, pos, expected):
|
||||||
|
assert SheerkaPromptCompleter.last_word(text, pos) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, pos, expected", [
|
||||||
|
("", 0, None),
|
||||||
|
("foo", 3, None),
|
||||||
|
("foo(", 4, FuncFound("foo", 0, 3)),
|
||||||
|
("foo(a, ", 7, FuncFound("foo", 0, 3)),
|
||||||
|
("foo( a , ", 9, FuncFound("foo", 0, 3)),
|
||||||
|
("foo(bar)", 8, None),
|
||||||
|
("foo(bar)", 7, FuncFound("foo", 0, 3)),
|
||||||
|
("foo()", 5, None),
|
||||||
|
("foo()", 4, FuncFound("foo", 0, 3)),
|
||||||
|
("xxx foo(", 8, FuncFound("foo", 4, 7)),
|
||||||
|
("foo (", 5, FuncFound("foo", 0, 4)),
|
||||||
|
("foo (", 6, FuncFound("foo", 0, 5)),
|
||||||
|
])
|
||||||
|
def test_inside_function(self, text, pos, expected):
|
||||||
|
assert SheerkaPromptCompleter.inside_function(text, pos) == expected
|
||||||
|
|
||||||
|
@pytest.mark.parametrize("text, expected_param_number, expected_comma_index", [
|
||||||
|
("", 0, -1),
|
||||||
|
("foo", 0, -1),
|
||||||
|
("foo, ", 1, 3),
|
||||||
|
("foo, ", 1, 3),
|
||||||
|
("foo, 'he,llo', ", 2, 13),
|
||||||
|
("foo, (he,llo), ", 2, 13),
|
||||||
|
("foo, (he,llo ", 1, 3),
|
||||||
|
("foo, 'he,llo ", 1, 3),
|
||||||
|
])
|
||||||
|
def test_get_param_number(self, text, expected_param_number, expected_comma_index):
|
||||||
|
assert SheerkaPromptCompleter.get_param_number(text) == (expected_param_number, expected_comma_index)
|
||||||
|
|
||||||
|
# def test_jedi_infer(self):
|
||||||
|
# sheerka = self.get_sheerka()
|
||||||
|
#
|
||||||
|
# document = Document("get_partition(")
|
||||||
|
# SheerkaPromptCompleter(sheerka).test_jedi(document)
|
||||||
|
# pass
|
||||||
|
#
|
||||||
|
# def test_parso_parser(self):
|
||||||
|
# import parso
|
||||||
|
# module = parso.parse("get_results() | filter('id==4', param2) | format_d()")
|
||||||
|
# pass
|
||||||
Reference in New Issue
Block a user