Added first version of console autocompletion

This commit is contained in:
2020-06-09 22:26:47 +02:00
parent d7573f095f
commit af3a3ffe92
23 changed files with 1314 additions and 88 deletions
+2 -1
View File
@@ -19,6 +19,7 @@ from sdp.sheerkaDataProvider import SheerkaDataProvider, Event
BASE_NODE_PARSER_CLASS = "parsers.BaseNodeParser.BaseNodeParser"
CONCEPTS_FILE = "_concepts.txt"
EXIT_COMMANDS = ("quit", "exit", "bye")
class Sheerka(Concept):
@@ -893,4 +894,4 @@ class Sheerka(Concept):
logging.basicConfig(format=log_format, level=log_level, handlers=[console_handler])
logging.addLevelName(logging.ERROR, "\033[1;41m%s\033[1;0m" % logging.getLevelName(logging.ERROR))
# uncomment the following line to enable colors
#logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
# logging.StreamHandler.emit = add_coloring_to_emit_ansi(logging.StreamHandler.emit)
+31
View File
@@ -0,0 +1,31 @@
from core.builtin_concepts import BuiltinConcepts
from core.sheerka.services.sheerka_service import BaseService
class SheerkaAdmin(BaseService):
NAME = "Admin"
def __init__(self, sheerka):
super().__init__(sheerka)
def initialize(self):
self.sheerka.bind_service_method(self.caches_names)
self.sheerka.bind_service_method(self.cache)
def caches_names(self):
"""
Returns the name of all the caches
:return:
"""
return list(self.sheerka.cache_manager.caches.keys())
def cache(self, name):
"""
Returns the content of a cache
:param name:
:return:
"""
if name not in self.sheerka.cache_manager.caches:
return self.sheerka.new(BuiltinConcepts.NOT_FOUND, body={"cache": name})
return self.sheerka.cache_manager.caches[name].cache.copy()
@@ -130,6 +130,12 @@ class SheerkaComparisonManager(BaseService):
return self._inner_add_comparison(comparison_obj)
def get_partition(self, prop_name, comparison_context="#"):
"""
Returns the equivalent classes for the property, using the comparison_context
:param prop_name:
:param comparison_context:
:return:
"""
weighted_concept = self.get_concepts_weights(prop_name, comparison_context)
return self._get_partition(weighted_concept)
@@ -306,9 +306,17 @@ class SheerkaExecute(BaseService):
result = evaluator.eval(sub_context, item)
if result is None:
# match() was successful but nothing was done in eval
# most of the time, it's because checks made in eval were unsuccessful
debug_result.append({"input": item, "return_value": None})
continue
if id(result) == id(item):
# eval was successful, but we don't want to alter the processing flow
debug_result.append({"input": item, "return_value": item})
continue
# otherwise, item will be removed and replaced by result
to_delete.append(item)
if isinstance(result, list):
evaluated_items.extend(result)
@@ -319,6 +327,8 @@ class SheerkaExecute(BaseService):
evaluator=evaluator)
result = self.sheerka.ret("sheerka.process", False, error, parents=[item])
evaluated_items.append(result)
# TODO: Add a validation to make sure that item is somewhere in return_value.parents
debug_result.append({"input": item, "return_value": result})
else:
debug_result.append({"input": item, "return_value": NO_MATCH})
@@ -0,0 +1,78 @@
from dataclasses import dataclass
from operator import itemgetter
from typing import Tuple, Dict, List
from cache.Cache import Cache
from core.sheerka.services.sheerka_service import BaseService, ServiceObj
@dataclass
class FunctionParametersObj(ServiceObj):
name: str
params: Dict[int, List[Tuple[str, int]]]
#
# params = {
# 1 : [('value 1', 1), ('value 2', 5)],
# 3 : [('value 3', 5)]
#
# the key is the number of the parameter
# the values are a tuple of (value seen, number of time this value is seen)
class SheerkaFunctionsParametersHistory(BaseService):
"""
This service remembers the parameters expected by functions
"""
NAME = "FunctionsParametersHistory"
FUNCTIONS_PARAMETERS_ENTRY = "FunctionsParametersHistory:Functions"
def __init__(self, sheerka):
super().__init__(sheerka)
self.cache = Cache(max_size=1024, default=lambda k: self.sheerka.sdp.get(self.FUNCTIONS_PARAMETERS_ENTRY, k))
def initialize(self):
self.sheerka.cache_manager.register_cache(self.FUNCTIONS_PARAMETERS_ENTRY, self.cache, True, True)
return self
def record_function_parameter(self, context, func_name: str, param_number: int, param_value: str):
"""
Declare a new value to the parameter and function
:param context:
:param func_name:
:param param_number:
:param param_value:
:return:
"""
old = self.cache.get(func_name)
if old is not None:
if param_number in old.params:
lst = old.params[param_number]
for i, value in enumerate(lst): # value is a tuple (param_value, counter)
if value[0] == param_value:
lst[i] = (param_value, value[1] + 1)
break
else:
lst.append((param_value, 1))
else:
old.params[param_number] = [(param_value, 1)]
self.cache.put(func_name, old)
else:
obj = FunctionParametersObj(context.event.get_digest(), func_name, {param_number: [(param_value, 1)]})
self.cache.put(func_name, obj)
def get_function_parameters(self, func_name: str, param_number: int):
"""
Return the list of param
:param func_name:
:param param_number:
:return:
"""
values = self.cache.get(func_name)
if values is None:
return []
if param_number not in values.params:
return []
return [item[0] for item in sorted(values.params[param_number], key=itemgetter(1), reverse=True)]
+1 -1
View File
@@ -3,7 +3,7 @@ from dataclasses import dataclass
@dataclass
class ServiceObj:
event_id: str # event where the object is created / modified
event_id: str # digest of the event where the object is created / modified
class BaseService:
+19 -12
View File
@@ -49,6 +49,7 @@ class TokenKind(Enum):
WORD = "word"
EQUALSEQUALS = "=="
VAR_DEF = "__var__"
REGEX = "r'xxx' or r\"xxx\" or r:xxx: or r|xxx| or r/xxx/"
@dataclass()
@@ -322,6 +323,12 @@ class Tokenizer:
yield Token(TokenKind.CONCEPT, (name, id), self.i, self.line, self.column)
self.i += length + 2
self.column += length + 2
elif c == "r" and self.i + 1 < self.text_len and self.text[self.i + 1] in "'\":|/":
string, newlines, column_index = self.eat_string(self.i + 1, self.line, self.column)
yield Token(TokenKind.REGEX, string, self.i, self.line, self.column) # quotes are kept
self.i += len(string) + 1
self.column = column_index # 1 if newlines > 0 else self.column + len(string)
self.line += newlines
elif self.parse_word and (c.isalpha() or c.isdigit()):
word = self.eat_word(self.i)
yield Token(TokenKind.WORD, word, self.i, self.line, self.column)
@@ -340,10 +347,10 @@ class Tokenizer:
self.i += len(number)
self.column += len(number)
elif c == "'" or c == '"':
string, newlines = self.eat_string(self.i, self.line, self.column)
string, newlines, column_index = self.eat_string(self.i, self.line, self.column)
yield Token(TokenKind.STRING, string, self.i, self.line, self.column) # quotes are kept
self.i += len(string)
self.column = 1 if newlines > 0 else self.column + len(string)
self.column = column_index # 1 if newlines > 0 else self.column + len(string)
self.line += newlines
elif c == "_":
yield Token(TokenKind.UNDERSCORE, "_", self.i, self.line, self.column)
@@ -445,21 +452,20 @@ class Tokenizer:
quote = self.text[start_index]
result = self.text[start_index]
lines_count = 0
column_index = start_column + 1
i = start_index + 1
escape = False
newline = None
#newline = None
while i < self.text_len:
c = self.text[i]
result += c
i += 1
column_index += 1
if newline:
if c == "\n":
lines_count += 1
newline = c if c == newline else None
else:
if c == "\r" or c == "\n":
newline = c
column_index = 1
if c == "\\":
escape = True
@@ -468,15 +474,16 @@ class Tokenizer:
else:
escape = False
# add trailing new line if needed
if newline:
lines_count += 1
# # add trailing new line if needed
# if newline:
# lines_count += 1
# column_index = 1
if result[-1] != quote:
raise LexerError("Missing Trailing quote", result, i, start_line + lines_count,
1 if lines_count > 0 else start_column + len(result))
return result, lines_count
return result, lines_count, column_index
def eat_word(self, start):
result = self.text[start]