Fixed #20: I can parse simple concepts
This commit is contained in:
+3
-10
@@ -2,22 +2,15 @@ import pytest
|
||||
|
||||
from common.global_symbols import NotInit
|
||||
from core.concept import DefinitionType
|
||||
from helpers import get_parser_input
|
||||
from parsers.ConceptDefinitionParser import ConceptDefinition, ConceptDefinitionParser
|
||||
from parsers.ParserInput import ParserInput
|
||||
from parsers.parser_utils import ParsingError, UnexpectedEof, UnexpectedToken
|
||||
from parsers.tokenizer import Keywords, Token, TokenKind
|
||||
|
||||
|
||||
def get_parser_input(text):
|
||||
pi = ParserInput(text)
|
||||
assert pi.init()
|
||||
|
||||
return pi
|
||||
|
||||
|
||||
class TestRecognizeDefConcept:
|
||||
class TestConceptDefinitionParser:
|
||||
@pytest.fixture()
|
||||
def parser(self, sheerka):
|
||||
def parser(self):
|
||||
return ConceptDefinitionParser()
|
||||
|
||||
@pytest.mark.parametrize("text", [
|
||||
@@ -0,0 +1,142 @@
|
||||
import pytest
|
||||
|
||||
from base import BaseTest
|
||||
from conftest import NewOntology
|
||||
from evaluators.base_evaluator import MultipleChoices
|
||||
from helpers import _mt, _ut, get_concepts, get_from, get_metadata, get_parser_input
|
||||
from parsers.SimpleParserParser import SimpleConceptsParser
|
||||
|
||||
|
||||
class TestSimpleConceptsParser(BaseTest):
|
||||
|
||||
@pytest.fixture()
|
||||
def parser(self):
|
||||
return SimpleConceptsParser()
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
("I am a new concept", [_mt("1003", 0, 8)]),
|
||||
("xxx yyy I am a new concept", [_ut("xxx yyy ", 0, 3), _mt("1003", 4, 12)]),
|
||||
("I am a new concept xxx yyy", [_mt("1003", 0, 8), _ut(" xxx yyy", 9, 12)]),
|
||||
("xxx I am a new concept yyy", [_ut("xxx ", 0, 1), _mt("1003", 2, 10), _ut(" yyy", 11, 12)]),
|
||||
("c:#1003:", [_mt("1003", 0, 0)]),
|
||||
("xxx c:#1003: yyy", [_ut("xxx ", 0, 1), _mt("1003", 2, 2), _ut(" yyy", 3, 4)]),
|
||||
("xxx c:I am: yyy", [_ut("xxx ", 0, 1), _mt("1002", 2, 2), _ut(" yyy", 3, 4)]),
|
||||
(" I am a new concept", [_ut(" ", 0, 0), _mt("1003", 1, 9)])
|
||||
])
|
||||
def test_i_can_recognize_a_concept(self, context, parser, text, expected):
|
||||
with NewOntology(context, "test_i_can_recognize_a_concept"):
|
||||
get_concepts(context, "I", "I am", "I am a new concept", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input(text)
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
assert res == MultipleChoices([expected])
|
||||
assert not parser.error_sink
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
("foo", [_mt("1001", 0, 0)]),
|
||||
("I am a new concept", [_mt("1001", 0, 8)])
|
||||
])
|
||||
def test_i_can_recognize_a_concept_by_its_name_and_its_definition(self, context, parser, text, expected):
|
||||
with NewOntology(context, "test_i_can_recognize_a_concept_by_its_name_and_its_definition"):
|
||||
get_concepts(context, get_metadata(name="foo", definition="I am a new concept"), use_sheerka=True)
|
||||
|
||||
pi = get_parser_input(text)
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
assert res == MultipleChoices([expected])
|
||||
assert not parser.error_sink
|
||||
|
||||
@pytest.mark.parametrize("text, expected", [
|
||||
("long concept name", [_mt("1001", 0, 4)]),
|
||||
("I am a new concept", [_mt("1001", 0, 8)])
|
||||
])
|
||||
def test_i_can_recognize_a_concept_by_its_name_when_long_name(self, context, parser, text, expected):
|
||||
with NewOntology(context, "test_i_can_recognize_a_concept_by_its_name_when_long_name"):
|
||||
get_concepts(context, get_metadata(name="long concept name", definition="I am a new concept"),
|
||||
use_sheerka=True)
|
||||
|
||||
pi = get_parser_input(text)
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
assert res == MultipleChoices([expected])
|
||||
assert not parser.error_sink
|
||||
|
||||
def test_i_can_parse_a_sequence_of_concept(self, context, parser):
|
||||
with NewOntology(context, "test_i_can_parse_a_sequence_of_concept"):
|
||||
get_concepts(context, "foo bar", "baz", "qux", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input("foo bar baz foo, qux")
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
expected = [_mt("1001", 0, 2),
|
||||
_ut(" ", 3, 3),
|
||||
_mt("1002", 4, 4),
|
||||
_ut(" foo, ", 5, 8),
|
||||
_mt("1003", 9, 9)]
|
||||
|
||||
assert res == MultipleChoices([expected])
|
||||
assert not parser.error_sink
|
||||
|
||||
def test_i_can_detect_multiple_choices(self, context, parser):
|
||||
with NewOntology(context, "test_i_can_detect_multiple_choices"):
|
||||
get_concepts(context, "foo bar", "bar baz", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input("foo bar baz")
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
expected1 = [_mt("1001", 0, 2), _ut(" baz", 3, 4)]
|
||||
expected2 = [_ut("foo ", 0, 1), _mt("1002", 2, 4)]
|
||||
|
||||
assert res == MultipleChoices([expected1, expected2])
|
||||
assert not parser.error_sink
|
||||
|
||||
def test_i_can_detect_multiple_choices_2(self, context, parser):
|
||||
with NewOntology(context, "test_i_can_detect_multiple_choices_2"):
|
||||
get_concepts(context, "one two", "one", "two", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input("one two")
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
expected1 = [_mt("1001", 0, 2)]
|
||||
expected2 = [_mt("1002", 0, 0), _ut(" ", 1, 1), _mt("1003", 2, 2)]
|
||||
|
||||
assert res == MultipleChoices([expected1, expected2])
|
||||
assert not parser.error_sink
|
||||
|
||||
def test_i_can_detect_multiple_choices_3(self, context, parser):
|
||||
with NewOntology(context, "test_i_can_detect_multiple_choices_2"):
|
||||
get_concepts(context, "one two", "one", "two", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input("one two xxx one two")
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
e1 = get_from(_mt("c:one two#1001:"), _ut(" xxx "), _mt("c:#1001:"))
|
||||
e2 = get_from(_mt("c:one#1002:"), _ut(" "), _mt("c:two#1003:"), _ut(" xxx "), _mt("c:one two#1001:"))
|
||||
e3 = get_from(_mt("c:one two#1001:"), _ut(" xxx "), _mt("c:one#1002:"), _ut(" "), _mt("c:two#1003:"))
|
||||
e4 = get_from(_mt("c:one#1002:"), _ut(" "), _mt("c:two#1003:"), _ut(" xxx "), _mt("c:#1002:"), _ut(" "),
|
||||
_mt("c:#1003:"))
|
||||
|
||||
assert res == MultipleChoices([e1, e2, e3, e4])
|
||||
assert not parser.error_sink
|
||||
|
||||
def test_nothing_is_return_is_no_concept_is_recognized(self, context, parser):
|
||||
pi = get_parser_input("one two three")
|
||||
res = parser.parse(context, pi)
|
||||
|
||||
assert res == MultipleChoices([])
|
||||
|
||||
def test_i_can_manage_attribute_reference(self, context, parser):
|
||||
with NewOntology(context, "test_i_can_detect_multiple_choices_2"):
|
||||
get_concepts(context, "foo", "i am a concept", use_sheerka=True)
|
||||
|
||||
pi = get_parser_input("foo.attribute")
|
||||
res = parser.parse(context, pi)
|
||||
expected = [_mt("1001", 0, 0), _ut(".attribute", 1, 2)]
|
||||
assert res == MultipleChoices([expected])
|
||||
|
||||
pi = get_parser_input("i am a concept.attribute")
|
||||
res = parser.parse(context, pi)
|
||||
expected = [_mt("1002", 0, 6), _ut(".attribute", 7, 8)]
|
||||
assert res == MultipleChoices([expected])
|
||||
|
||||
@@ -0,0 +1,82 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
from parsers.state_machine import End, Start, State, StateMachine, StateResult
|
||||
|
||||
|
||||
@dataclass
|
||||
class DummyExecutionContext:
|
||||
count: int
|
||||
|
||||
def to_debug(self):
|
||||
return {"count": self.count}
|
||||
|
||||
|
||||
class GenericTestState(State):
|
||||
def __init__(self, name, next_state, fork=None):
|
||||
super().__init__(name=name, next_states=[next_state])
|
||||
self.next_state = next_state
|
||||
self.fork = fork
|
||||
|
||||
def run(self, state_context) -> StateResult:
|
||||
return StateResult(self.next_state, self.fork)
|
||||
|
||||
def __repr__(self):
|
||||
return f"(GenericTestState {self.name} -> {self.next_state}, forks={len(self.fork) if self.fork else 0})"
|
||||
|
||||
|
||||
def test_i_can_execute_a_workflow():
|
||||
wkf_as_list = [Start("start", ["a"]),
|
||||
GenericTestState("a", "b"),
|
||||
GenericTestState("b", "c"),
|
||||
GenericTestState("c", "end"),
|
||||
End("end", None)]
|
||||
|
||||
wkf = {state.name: state for state in wkf_as_list}
|
||||
|
||||
state_machine = StateMachine({"#wkf": wkf})
|
||||
state_machine.run("#wkf", "start", DummyExecutionContext(0))
|
||||
|
||||
assert len(state_machine.paths) == 1
|
||||
assert state_machine.paths[0].get_audit_trail() == ["#wkf:start", "#wkf:a", "#wkf:b", "#wkf:c", "#wkf:end"]
|
||||
|
||||
|
||||
def test_i_can_change_workflow():
|
||||
wkf1_as_list = [Start("start", ["a"]),
|
||||
GenericTestState("a", "#wkf2")]
|
||||
|
||||
wkf2_as_list = [Start("start", ["c"]),
|
||||
GenericTestState("c", "end"),
|
||||
End("end", None)]
|
||||
|
||||
wkfs = {
|
||||
"#wkf1": {state.name: state for state in wkf1_as_list},
|
||||
"#wkf2": {state.name: state for state in wkf2_as_list}
|
||||
}
|
||||
|
||||
state_machine = StateMachine(wkfs)
|
||||
state_machine.run("#wkf1", "start", DummyExecutionContext(0))
|
||||
|
||||
assert len(state_machine.paths) == 1
|
||||
assert state_machine.paths[0].get_audit_trail() == ["#wkf1:start", "#wkf1:a", "#wkf2:start", "#wkf2:c", "#wkf2:end"]
|
||||
|
||||
|
||||
def test_i_can_fork_path():
|
||||
wkf_as_list = [Start("start", ["a"]),
|
||||
GenericTestState("a", "end", [("b", DummyExecutionContext(i)) for i in range(3)]),
|
||||
GenericTestState("b", "end"),
|
||||
End("end", None)]
|
||||
|
||||
wkf = {state.name: state for state in wkf_as_list}
|
||||
|
||||
state_machine = StateMachine({"#wkf": wkf})
|
||||
state_machine.run("#wkf", "start", DummyExecutionContext(0))
|
||||
|
||||
assert len(state_machine.paths) == 4
|
||||
assert state_machine.paths[0].get_audit_trail() == ["#wkf:start", "#wkf:a", "#wkf:end"]
|
||||
assert state_machine.paths[0].history[1].forks == [1, 2, 3]
|
||||
assert state_machine.paths[1].get_audit_trail() == ["#wkf:start", "#wkf:a", "#wkf:b", "#wkf:end"]
|
||||
assert state_machine.paths[1].history[0].parents == [0]
|
||||
assert state_machine.paths[2].get_audit_trail() == ["#wkf:start", "#wkf:a", "#wkf:b", "#wkf:end"]
|
||||
assert state_machine.paths[2].history[0].parents == [0]
|
||||
assert state_machine.paths[3].get_audit_trail() == ["#wkf:start", "#wkf:a", "#wkf:b", "#wkf:end"]
|
||||
assert state_machine.paths[3].history[0].parents == [0]
|
||||
Reference in New Issue
Block a user