本文整理汇总了Python中lark.Lark.open方法的典型用法代码示例。如果您正苦于以下问题:Python Lark.open方法的具体用法?Python Lark.open怎么用?Python Lark.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lark.Lark
的用法示例。
在下文中一共展示了Lark.open方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_parser
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def _get_parser(
self,
name: str,
add_metadata: bool = False,
grammar_filename: str = "gdscript.lark",
) -> Tree:
version: str = pkg_resources.get_distribution("gdtoolkit").version
tree: Tree = None
cache_filepath: str = os.path.join(
self._cache_dirpath, version, name
) + ".pickle"
grammar_filepath: str = os.path.join(self._directory, grammar_filename)
if not os.path.exists(cache_filepath) or not self._use_grammar_cache:
tree = Lark.open(
grammar_filepath,
parser="lalr",
start="start",
postlex=Indenter(),
propagate_positions=add_metadata,
maybe_placeholders=False,
)
self.save(tree, cache_filepath)
else:
tree = self.load(cache_filepath)
return tree
示例2: save
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def save(a_parser: Tree, path: str) -> None:
"""Serializes the Lark parser and saves it to the disk."""
data, memo = a_parser.memo_serialize([TerminalDef, Rule])
write_data: dict = {
"data": data,
"memo": memo,
}
dirpath: str = os.path.dirname(path)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
with open(path, "wb") as file_parser:
pickle.dump(write_data, file_parser, pickle.HIGHEST_PROTOCOL)
示例3: load
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def load(path: str) -> Tree:
"""Loads the Lark parser from the disk and deserializes it."""
with open(path, "rb") as file_parser:
data: dict = pickle.load(file_parser)
namespace = {"Rule": Rule, "TerminalDef": TerminalDef}
return Lark.deserialize(
data["data"],
namespace,
data["memo"],
transformer=None,
postlex=Indenter(),
)
示例4: _read
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def _read(fn, *args):
kwargs = {'encoding': 'iso-8859-1'}
with open(fn, *args, **kwargs) as f:
return f.read()
示例5: get_lark_grammar
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def get_lark_grammar():
return Lark.open(
"tests/grammar/vyper.lark", parser="lalr", start="module", postlex=PythonIndenter()
)
示例6: _setup_parser
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def _setup_parser(self):
self.substitutions = 0
self.reductions = 0
self.const_handler = ConstHandler()
self.parser = Lark.open(GRAMMAR_FILE, transformer=LambdaTransformer(self.const_handler), **self.lark_kwargs)
示例7: _get_parser
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def _get_parser(self, language_spec):
"""Get a parser and lazy create it if necessary"""
try:
return self._parsers[language_spec.code]
except KeyError:
parser = Lark.open(
str(self.grammerfile),
parser="lalr",
transformer=self._transformer,
edit_terminals=language_spec,
)
self._parsers[language_spec.code] = parser
return parser
示例8: parse_file
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def parse_file(self, featurefile: Path, feature_id: int = 0):
"""Parse the given Feature File using the parser"""
with open(str(featurefile), "r", encoding="utf-8") as featurefile_f:
contents = featurefile_f.read()
return self.parse_contents(featurefile, contents, feature_id)
示例9: _detect_language
# 需要导入模块: from lark import Lark [as 别名]
# 或者: from lark.Lark import open [as 别名]
def _detect_language(self, featurefile_contents: str):
"""Detect the specified language in the first line of the Feature File
If no language code is detected ``en`` is used.
If an unknown language code is detected an error is raised.
"""
def __get_language_spec(code):
language_spec_path = (
Path(__file__).parent / "languages" / "{}.json".format(code)
)
if not language_spec_path.exists():
raise RadishLanguageNotFound(code)
with open(
str(language_spec_path), "r", encoding="utf-8"
) as language_spec_file:
keywords = json.load(language_spec_file)
return LanguageSpec(code, keywords)
match = re.match(
r"^#\s*language:\s*(?P<code>[a-zA-Z-]{2,})", featurefile_contents.lstrip()
)
language_code = match.groupdict()["code"] if match else "en"
return __get_language_spec(language_code)