Introduced ParserInput

This commit is contained in:
2020-05-25 18:09:12 +02:00
parent c79403443f
commit 479461c0a4
35 changed files with 768 additions and 480 deletions
+41 -37
View File
@@ -5,7 +5,7 @@ from enum import Enum
import core.utils
from core.builtin_concepts import BuiltinConcepts
from core.concept import VARIABLE_PREFIX, Concept, DEFINITION_TYPE_BNF, ConceptParts
from core.sheerka.ExecutionContext import ExecutionContext
from core.sheerka.services.SheerkaExecute import ParserInput
from core.tokenizer import TokenKind, LexerError, Token, Keywords
from parsers.BaseParser import Node, BaseParser, ErrorNode
@@ -86,7 +86,6 @@ class UnrecognizedTokensNode(LexerNode):
else:
self.end -= 1
def has_open_paren(self):
return self.parenthesis_count > 0
@@ -598,13 +597,13 @@ class BaseNodeParser(BaseParser):
else:
self.concepts_by_first_keyword = None
self.token = None
self.pos = -1
self.tokens = None
self.context: ExecutionContext = None
self.text = None
self.sheerka = None
# self.token = None
# self.pos = -1
# self.tokens = None
#
# self.context: ExecutionContext = None
# self.text = None
# self.sheerka = None
def init_from_concepts(self, context, concepts, **kwargs):
"""
@@ -617,43 +616,48 @@ class BaseNodeParser(BaseParser):
concepts_by_first_keyword = self.get_concepts_by_first_keyword(context, concepts).body
self.concepts_by_first_keyword = self.resolve_concepts_by_first_keyword(context, concepts_by_first_keyword).body
def reset_parser(self, context, text):
def reset_parser(self, context, parser_input: ParserInput):
self.context = context
self.sheerka = context.sheerka
self.text = text
self.parser_input = parser_input
try:
self.tokens = list(self.get_input_as_tokens(text))
self.parser_input.reset(False)
except LexerError as e:
self.add_error(self.sheerka.new(BuiltinConcepts.ERROR, body=e), False)
return False
self.token = None
self.pos = -1
return True
# self.text = text
#
# try:
# self.tokens = list(self.get_input_as_tokens(text))
#
#
# self.token = None
# self.pos = -1
# return True
def add_error(self, error, next_token=True):
self.error_sink.append(error)
if next_token:
self.next_token()
return error
# def add_error(self, error, next_token=True):
# self.error_sink.append(error)
# if next_token:
# self.parser_input.next_token()
# return error
def get_token(self) -> Token:
return self.token
def next_token(self, skip_whitespace=True):
if self.token and self.token.type == TokenKind.EOF:
return False
self.pos += 1
self.token = self.tokens[self.pos]
if skip_whitespace:
while self.token.type == TokenKind.WHITESPACE or self.token.type == TokenKind.NEWLINE:
self.pos += 1
self.token = self.tokens[self.pos]
return self.token.type != TokenKind.EOF
# def get_token(self) -> Token:
# return self.token
#
# def next_token(self, skip_whitespace=True):
# if self.token and self.token.type == TokenKind.EOF:
# return False
#
# self.pos += 1
# self.token = self.tokens[self.pos]
#
# if skip_whitespace:
# while self.token.type == TokenKind.WHITESPACE or self.token.type == TokenKind.NEWLINE:
# self.pos += 1
# self.token = self.tokens[self.pos]
#
# return self.token.type != TokenKind.EOF
def get_concepts(self, token, to_keep, custom=None, to_map=None, strip_quotes=False):
"""