ExactConceptParser can now recognize concepts by their names

This commit is contained in:
2020-05-21 16:27:18 +02:00
parent d357329f51
commit 37d3d16e21
17 changed files with 347 additions and 112 deletions
+41 -23
View File
@@ -3,7 +3,9 @@ import logging
from core.builtin_concepts import ReturnValueConcept, BuiltinConcepts
from core.concept import VARIABLE_PREFIX
from core.tokenizer import Keywords, TokenKind, LexerError
from core.utils import str_concept
from parsers.BaseParser import BaseParser
import core.builtin_helpers
class ExactConceptParser(BaseParser):
@@ -26,8 +28,8 @@ class ExactConceptParser(BaseParser):
"""
context.log(f"Parsing '{parser_input}'", self.name)
res = []
sheerka = context.sheerka
try:
words = self.get_words(parser_input)
except LexerError as e:
@@ -40,7 +42,7 @@ class ExactConceptParser(BaseParser):
body = sheerka.new(BuiltinConcepts.NOT_FOR_ME, body=parser_input, reason=too_long)
return sheerka.ret(self.name, False, body)
recognized = [] # keep track of the concepts founds
already_recognized = [] # keep track of the concepts founds
for combination in self.combinations(words):
concept_key = " ".join(combination)
@@ -52,7 +54,7 @@ class ExactConceptParser(BaseParser):
concepts = result if isinstance(result, list) else [result]
for concept in concepts:
if concept.id in recognized:
if concept in already_recognized:
context.log(f"Recognized concept {concept} again. Skipping.", self.name)
# example
# if the input is foo a and a concept is defined as foo a
@@ -65,38 +67,33 @@ class ExactConceptParser(BaseParser):
for i, token in enumerate(combination):
if token.startswith(VARIABLE_PREFIX):
index = int(token[len(VARIABLE_PREFIX):])
concept.def_var_by_index(index, words[i])
value = words[i]
concept.def_var_by_index(index, str_concept(value) if isinstance(value, tuple) else value)
concept.metadata.need_validation = True
if self.verbose_log.isEnabledFor(logging.DEBUG):
prop_name = concept.metadata.variables[index][0]
context.log(
f"Added property {index}: {prop_name}='{words[i]}'.",
f"Added variable {index}: {prop_name}='{words[i]}'.",
self.name)
res.append(ReturnValueConcept(
self.name,
True,
context.sheerka.new(
BuiltinConcepts.PARSER_RESULT,
parser=self,
source=parser_input if isinstance(parser_input, str) else self.get_text_from_tokens(
parser_input),
body=concept,
try_parsed=concept)))
recognized.append(concept.id)
already_recognized.append(concept)
if len(recognized) > 0:
by_name = sheerka.resolve(self.get_input_as_text(parser_input))
core.builtin_helpers.set_is_evaluated(by_name)
recognized = self.merge_concepts(already_recognized, by_name)
if len(recognized) == 0:
ret = sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.UNKNOWN_CONCEPT, body=parser_input))
self.log_result(context, parser_input, ret)
return ret
else:
res = [self.as_return_value(context, parser_input, c) for c in recognized]
if len(res) == 1:
self.log_result(context, parser_input, res[0])
else:
self.log_multiple_results(context, parser_input, res)
return res
return res
ret = sheerka.ret(self.name, False, sheerka.new(BuiltinConcepts.UNKNOWN_CONCEPT, body=parser_input))
self.log_result(context, parser_input, ret)
return ret
def get_words(self, text):
tokens = self.get_input_as_tokens(text)
res = []
@@ -138,7 +135,17 @@ class ExactConceptParser(BaseParser):
indices[j] = indices[j - 1] + 1
res.add(self.get_tuple(pool, indices))
return res
# remove all result that contains a token concepts
# They are not valid entries, since a token concept MUST be replaced by a variable
filtered = set()
for combination in res:
for entry in combination:
if isinstance(entry, tuple):
break
else:
filtered.add(combination)
return filtered
@staticmethod
def get_tuple(pool, indices):
@@ -158,3 +165,14 @@ class ExactConceptParser(BaseParser):
value = pool[i]
res.append(vars[value] if value in vars else value)
return tuple(res)
def as_return_value(self, context, parser_input, concept):
return ReturnValueConcept(
self.name,
True,
context.sheerka.new(
BuiltinConcepts.PARSER_RESULT,
parser=self,
source=parser_input if isinstance(parser_input, str) else self.get_text_from_tokens(parser_input),
body=concept,
try_parsed=concept))