本文整理汇总了Python中sphinx.versioning.merge_doctrees函数的典型用法代码示例。如果您正苦于以下问题:Python merge_doctrees函数的具体用法?Python merge_doctrees怎么用?Python merge_doctrees使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了merge_doctrees函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_insert
def test_insert():
insert = doctrees['versioning/insert']
new_nodes = list(merge_doctrees(original, insert, is_paragraph))
uids = [n.uid for n in insert.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:]
示例2: test_insert_similar
def test_insert_similar():
insert_similar = doctrees['versioning/insert_similar']
new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph))
uids = [n.uid for n in insert_similar.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert new_nodes[0].rawsource == u'Anyway I need more'
assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:]
示例3: test_insert_beginning
def test_insert_beginning():
insert_beginning = doctrees['versioning/insert_beginning']
new_nodes = list(merge_doctrees(original, insert_beginning, is_paragraph))
uids = [n.uid for n in insert_beginning.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert len(uids) == 4
assert original_uids == uids[1:]
assert original_uids[0] != uids[0]
示例4: read_doc
def read_doc(self, docname, app=None):
# type: (unicode, Sphinx) -> None
"""Parse a file and add/update inventory entries for the doctree."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
self.temp_data['default_domain'] = \
self.domains.get(self.config.primary_domain)
self.settings['input_encoding'] = self.config.source_encoding
self.settings['trim_footnote_reference_space'] = \
self.config.trim_footnote_reference_space
self.settings['gettext_compact'] = self.config.gettext_compact
docutilsconf = path.join(self.srcdir, 'docutils.conf')
# read docutils.conf from source dir, not from current dir
OptionParser.standard_config_files[1] = docutilsconf
if path.isfile(docutilsconf):
self.note_dependency(docutilsconf)
with sphinx_domains(self):
if self.config.default_role:
role_fn, messages = roles.role(self.config.default_role, english,
0, dummy_reporter)
if role_fn:
roles._roles[''] = role_fn
else:
logger.warning('default role %s not found', self.config.default_role,
location=docname)
codecs.register_error('sphinx', self.warn_and_replace) # type: ignore
# publish manually
reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
pub = Publisher(reader=reader,
writer=SphinxDummyWriter(),
destination_class=NullOutput)
pub.set_components(None, 'restructuredtext', None)
pub.process_programmatic_settings(None, self.settings, None)
src_path = self.doc2path(docname)
source = SphinxFileInput(app, self, source=None, source_path=src_path,
encoding=self.config.source_encoding)
pub.source = source
pub.settings._source = src_path
pub.set_destination(None, None)
pub.publish()
doctree = pub.document
# post-processing
for domain in itervalues(self.domains):
domain.process_doc(self, docname, doctree)
# allow extension-specific post-processing
if app:
app.emit('doctree-read', doctree)
# store time of reading, for outdated files detection
# (Some filesystems have coarse timestamp resolution;
# therefore time.time() can be older than filesystem's timestamp.
# For example, FAT32 has 2sec timestamp resolution.)
self.all_docs[docname] = max(
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
old_doctree = None
if self.versioning_compare:
# get old doctree
try:
with open(self.doc2path(docname,
self.doctreedir, '.doctree'), 'rb') as f:
old_doctree = pickle.load(f)
except EnvironmentError:
pass
# add uids for versioning
if not self.versioning_compare or old_doctree is None:
list(add_uids(doctree, self.versioning_condition))
else:
list(merge_doctrees(
old_doctree, doctree, self.versioning_condition))
# make it picklable
doctree.reporter = None
doctree.transformer = None
doctree.settings.warning_stream = None
doctree.settings.env = None
doctree.settings.record_dependencies = None
# cleanup
self.temp_data.clear()
self.ref_context.clear()
roles._roles.pop('', None) # if a document has set a local default role
# save the parsed doctree
doctree_filename = self.doc2path(docname, self.doctreedir,
'.doctree')
ensuredir(path.dirname(doctree_filename))
with open(doctree_filename, 'wb') as f:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
示例5: test_deleted_end
def test_deleted_end():
deleted_end = doctrees['versioning/deleted_end']
new_nodes = list(merge_doctrees(original, deleted_end, is_paragraph))
uids = [n.uid for n in deleted_end.traverse(is_paragraph)]
assert not new_nodes
assert original_uids[:-1] == uids
示例6: test_added
def test_added():
added = doctrees['versioning/added']
new_nodes = list(merge_doctrees(original, added, is_paragraph))
uids = [n.uid for n in added.traverse(is_paragraph)]
assert len(new_nodes) == 1
assert original_uids == uids[:-1]
示例7: test_modified
def test_modified():
modified = doctrees['versioning/modified']
new_nodes = list(merge_doctrees(original, modified, is_paragraph))
uids = [n.uid for n in modified.traverse(is_paragraph)]
assert not new_nodes
assert original_uids == uids
示例8: test_deleted
def test_deleted():
deleted = doctrees['deleted']
new_nodes = list(merge_doctrees(original, deleted, is_paragraph))
uids = [n.uid for n in deleted.traverse(is_paragraph)]
assert not new_nodes
assert original_uids[::2] == uids