Added bnf when adding a new concept + Started logging filtering
This commit is contained in:
+63
-5
@@ -3,6 +3,8 @@ import inspect
|
||||
import pkgutil
|
||||
import sys
|
||||
|
||||
from core.tokenizer import TokenKind
|
||||
|
||||
|
||||
def sysarg_to_string(argv):
|
||||
"""
|
||||
@@ -72,11 +74,18 @@ def get_full_qualified_name(obj):
|
||||
:param obj:
|
||||
:return:
|
||||
"""
|
||||
module = obj.__class__.__module__
|
||||
if module is None or module == str.__class__.__module__:
|
||||
return obj.__class__.__name__ # Avoid reporting __builtin__
|
||||
if obj.__class__ == type:
|
||||
module = obj.__module__
|
||||
if module is None or module == str.__class__.__module__:
|
||||
return obj.__name__ # Avoid reporting __builtin__
|
||||
else:
|
||||
return module + '.' + obj.__name__
|
||||
else:
|
||||
return module + '.' + obj.__class__.__name__
|
||||
module = obj.__class__.__module__
|
||||
if module is None or module == str.__class__.__module__:
|
||||
return obj.__class__.__name__ # Avoid reporting __builtin__
|
||||
else:
|
||||
return module + '.' + obj.__class__.__name__
|
||||
|
||||
|
||||
def get_classes(module_name):
|
||||
@@ -137,7 +146,7 @@ def remove_from_list(lst, to_remove_predicate):
|
||||
|
||||
def product(a, b):
|
||||
"""
|
||||
Kind of cartesian product between list a and b
|
||||
Kind of cartesian product between lists a and b
|
||||
knowing that a is also a list
|
||||
|
||||
So it's a cartesian product between a list of list and a list
|
||||
@@ -155,3 +164,52 @@ def product(a, b):
|
||||
res.append(items)
|
||||
|
||||
return res
|
||||
|
||||
|
||||
def strip_quotes(text):
|
||||
if not isinstance(text, str):
|
||||
return text
|
||||
|
||||
if text == "":
|
||||
return ""
|
||||
|
||||
if text[0] == "'" or text[0] == '"':
|
||||
return text[1:-1]
|
||||
|
||||
return text
|
||||
|
||||
|
||||
def strip_tokens(tokens, strip_eof=False):
|
||||
"""
|
||||
Remove the starting and trailing spaces and newline
|
||||
"""
|
||||
if tokens is None:
|
||||
return None
|
||||
|
||||
start = 0
|
||||
length = len(tokens)
|
||||
while start < length and tokens[start].type in (TokenKind.WHITESPACE, TokenKind.NEWLINE):
|
||||
start += 1
|
||||
|
||||
if start == length:
|
||||
return []
|
||||
|
||||
end_tokens = (TokenKind.WHITESPACE, TokenKind.NEWLINE, TokenKind.EOF) \
|
||||
if strip_eof \
|
||||
else (TokenKind.WHITESPACE, TokenKind.NEWLINE)
|
||||
|
||||
end = length - 1
|
||||
while end > 0 and tokens[end].type in end_tokens:
|
||||
end -= 1
|
||||
|
||||
return tokens[start: end + 1]
|
||||
|
||||
|
||||
def pp(items):
|
||||
if not hasattr(items, "__iter__"):
|
||||
return str(items)
|
||||
|
||||
if len(items) == 0:
|
||||
return str(items)
|
||||
|
||||
return " \n" + " \n".join(str(item) for item in items)
|
||||
|
||||
Reference in New Issue
Block a user