本文整理汇总了Python中sphinx.util.status_iterator函数的典型用法代码示例。如果您正苦于以下问题:Python status_iterator函数的具体用法?Python status_iterator怎么用?Python status_iterator使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了status_iterator函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_status_iterator
def test_status_iterator(app, status, warning):
logging.setup(app, status, warning)
# test for old_status_iterator
status.truncate(0)
yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... '))
output = strip_escseq(status.getvalue())
assert 'testing ... hello sphinx world \n' in output
assert yields == ['hello', 'sphinx', 'world']
# test for status_iterator (verbosity=0)
status.truncate(0)
yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
length=3, verbosity=0))
output = strip_escseq(status.getvalue())
assert 'testing ... [ 33%] hello \r' in output
assert 'testing ... [ 66%] sphinx \r' in output
assert 'testing ... [100%] world \r\n' in output
assert yields == ['hello', 'sphinx', 'world']
# test for status_iterator (verbosity=1)
status.truncate(0)
yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
length=3, verbosity=1))
output = strip_escseq(status.getvalue())
assert 'testing ... [ 33%] hello\n' in output
assert 'testing ... [ 66%] sphinx\n' in output
assert 'testing ... [100%] world\n\n' in output
assert yields == ['hello', 'sphinx', 'world']
示例2: _write_parallel
def _write_parallel(self, docnames, nproc):
# type: (Sequence[unicode], int) -> None
def write_process(docs):
# type: (List[Tuple[unicode, nodes.Node]]) -> None
self.app.phase = BuildPhase.WRITING
for docname, doctree in docs:
self.write_doc(docname, doctree)
# warm up caches/compile templates using the first document
firstname, docnames = docnames[0], docnames[1:]
self.app.phase = BuildPhase.RESOLVING
doctree = self.env.get_and_resolve_doctree(firstname, self)
self.app.phase = BuildPhase.WRITING
self.write_doc_serialized(firstname, doctree)
self.write_doc(firstname, doctree)
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
self.app.phase = BuildPhase.RESOLVING
for chunk in status_iterator(chunks, __('writing output... '), "darkgreen",
len(chunks), self.app.verbosity):
arg = []
for i, docname in enumerate(chunk):
doctree = self.env.get_and_resolve_doctree(docname, self)
self.write_doc_serialized(docname, doctree)
arg.append((docname, doctree))
tasks.add_task(write_process, arg)
# make sure all threads have finished
logger.info(bold(__('waiting for workers...')))
tasks.join()
示例3: _read_parallel
def _read_parallel(self, docnames, nproc):
# type: (List[unicode], int) -> None
# clear all outdated docs at once
for docname in docnames:
self.app.emit('env-purge-doc', self.env, docname)
self.env.clear_doc(docname)
def read_process(docs):
# type: (List[unicode]) -> bytes
self.env.app = self.app
for docname in docs:
self.read_doc(docname)
# allow pickling self to send it back
return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)
def merge(docs, otherenv):
# type: (List[unicode], bytes) -> None
env = pickle.loads(otherenv)
self.env.merge_info_from(docs, env, self.app)
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
for chunk in status_iterator(chunks, 'reading sources... ', "purple",
len(chunks), self.app.verbosity):
tasks.add_task(read_process, chunk, merge)
# make sure all threads have finished
logger.info(bold('waiting for workers...'))
tasks.join()
示例4: finish
def finish(self):
self.env.get_doctree = self._original_get_doctree
if self.publish:
self.legacy_assets = {}
self.legacy_pages = None
self.parent_id = self.publisher.getBasePageId()
for docname in status_iterator(
self.publish_docnames, 'publishing documents... ',
length=len(self.publish_docnames),
verbosity=self.app.verbosity):
if self.publish_subset and docname not in self.publish_subset:
continue
docfile = path.join(self.outdir, self.file_transform(docname))
try:
with io.open(docfile, 'r', encoding='utf-8') as file:
output = file.read()
self.publish_doc(docname, output)
except (IOError, OSError) as err:
ConfluenceLogger.warn("error reading file %s: "
"%s" % (docfile, err))
def to_asset_name(asset):
return asset[0]
assets = self.assets.build()
for asset in status_iterator(assets, 'publishing assets... ',
length=len(assets), verbosity=self.app.verbosity,
stringify_func=to_asset_name):
key, absfile, type, hash, docname = asset
if self.publish_subset and docname not in self.publish_subset:
continue
try:
with open(absfile, 'rb') as file:
output = file.read()
self.publish_asset(key, docname, output, type, hash)
except (IOError, OSError) as err:
ConfluenceLogger.warn("error reading asset %s: "
"%s" % (key, err))
self.publish_purge()
self.publish_finalize()
示例5: _read_serial
def _read_serial(self, docnames):
# type: (List[unicode]) -> None
for docname in status_iterator(docnames, 'reading sources... ', "purple",
len(docnames), self.app.verbosity):
# remove all inventory entries for that file
self.app.emit('env-purge-doc', self.env, docname)
self.env.clear_doc(docname)
self.read_doc(docname)
示例6: _write_serial
def _write_serial(self, docnames):
# type: (Sequence[unicode]) -> None
with logging.pending_warnings():
for docname in status_iterator(docnames, 'writing output... ', "darkgreen",
len(docnames), self.app.verbosity):
doctree = self.env.get_and_resolve_doctree(docname, self)
self.write_doc_serialized(docname, doctree)
self.write_doc(docname, doctree)
示例7: status_iterator
def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
stringify_func=_display_chunk):
# type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable
warnings.warn('app.status_iterator() is now deprecated. '
'Use sphinx.util.status_iterator() instead.',
RemovedInSphinx17Warning)
for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity,
color="darkgreen", stringify_func=stringify_func):
yield item
示例8: _write_serial
def _write_serial(self, docnames):
# type: (Sequence[str]) -> None
with logging.pending_warnings():
for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
len(docnames), self.app.verbosity):
self.app.phase = BuildPhase.RESOLVING
doctree = self.env.get_and_resolve_doctree(docname, self)
self.app.phase = BuildPhase.WRITING
self.write_doc_serialized(docname, doctree)
self.write_doc(docname, doctree)
示例9: run
def run(self):
env = self.state.document.settings.env
app = env.app
# workaround (used below) for https://github.com/sphinx-doc/sphinx/issues/3924
current_docname = env.docname
docdir = dirname(env.doc2path(env.docname))
specpath = join(docdir, self.arguments[0])
dest_dir = join(dirname(specpath), "gallery")
ensuredir(dest_dir)
env.note_dependency(specpath)
spec = json.load(open(specpath))
details = spec['details']
details_iter = status_iterator(details,
'copying gallery files... ',
'brown',
len(details),
stringify_func=lambda x: x['name'] + ".py")
env.gallery_updated = []
for detail in details_iter:
src_path = abspath(join("..", detail['path']))
dest_path = join(dest_dir, detail['name'] + ".py")
# sphinx pickled env works only with forward slash
docname = join(env.app.config.bokeh_gallery_dir, detail['name']).replace("\\","/")
try:
copyfile(src_path, dest_path)
except OSError as e:
raise SphinxError('cannot copy gallery file %r, reason: %s' % (src_path, e))
try:
env.clear_doc(docname)
env.read_doc(docname, app=app)
env.gallery_updated.append(docname)
except Exception as e:
raise SphinxError('failed to read gallery doc %r, reason: %s' % (docname, e))
names = [detail['name']for detail in details]
rst_text = GALLERY_PAGE.render(names=names)
# workaround for https://github.com/sphinx-doc/sphinx/issues/3924
env.temp_data['docname'] = current_docname
return self._parse(rst_text, "<bokeh-gallery>")
示例10: compile_catalogs
def compile_catalogs(self, catalogs, message):
# type: (Set[CatalogInfo], unicode) -> None
if not self.config.gettext_auto_build:
return
def cat2relpath(cat):
# type: (CatalogInfo) -> unicode
return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)
logger.info(bold(__('building [mo]: ')) + message)
for catalog in status_iterator(catalogs, __('writing output... '), "darkgreen",
len(catalogs), self.app.verbosity,
stringify_func=cat2relpath):
catalog.write_mo(self.config.language)
示例11: copy_image_files
def copy_image_files(self):
# type: () -> None
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
copy_asset_file(path.join(self.srcdir, src),
path.join(self.outdir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
示例12: write
def write(self, *ignored):
writer = TextWriter(self)
for label in status_iterator(pydoc_topic_labels,
'building topics... ',
length=len(pydoc_topic_labels)):
if label not in self.env.domaindata['std']['labels']:
self.warn('label %r not in documentation' % label)
continue
docname, labelid, sectname = self.env.domaindata['std']['labels'][label]
doctree = self.env.get_and_resolve_doctree(docname, self)
document = new_document('<section node>')
document.append(doctree.ids[labelid])
destination = StringOutput(encoding='utf-8')
writer.write(document, destination)
self.topics[label] = writer.output
示例13: _extract_from_template
def _extract_from_template(self):
# type: () -> None
files = self._collect_templates()
logger.info(bold('building [%s]: ' % self.name), nonl=1)
logger.info('targets for %d template files', len(files))
extract_translations = self.templates.environment.extract_translations
for template in status_iterator(files, 'reading templates... ', "purple", # type: ignore # NOQA
len(files), self.app.verbosity):
with open(template, 'r', encoding='utf-8') as f: # type: ignore
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
self.catalogs['sphinx'].add(msg, origin)
示例14: config_inited_handler
def config_inited_handler(app, config):
gallery_dir = join(app.srcdir, config.bokeh_gallery_dir)
gallery_file = gallery_dir + ".json"
if not exists(gallery_file) and isfile(gallery_file):
raise SphinxError("could not find gallery file %r for configured gallery dir %r" % (gallery_file, gallery_dir))
gallery_file_mtime = getmtime(gallery_file)
ensuredir(gallery_dir)
# we will remove each file we process from this set and see if anything is
# left at the end (and remove it in that case)
extras = set(os.listdir(gallery_dir))
# app.env.note_dependency(specpath)
spec = json.load(open(gallery_file))
details = spec['details']
names = set(x['name'] for x in details)
if len(names) < len(details):
raise SphinxError("gallery file %r has duplicate names" % gallery_file)
details_iter = status_iterator(details,
'creating gallery file entries... ',
'brown',
len(details),
app.verbosity,
stringify_func=lambda x: x['name'] + ".rst")
for detail in details_iter:
detail_file_name = detail['name'] + ".rst"
detail_file_path = join(gallery_dir, detail_file_name)
if detail_file_path in extras:
extras.remove(detail_file_path)
# if the gallery detail file is newer than the gallery file, assume it is up to date
if exists(detail_file_path) and getmtime(detail_file_path) > gallery_file_mtime:
continue
with open(detail_file_path, "w") as f:
source_path = abspath(join(app.srcdir, "..", "..", detail['path']))
f.write(GALLERY_DETAIL.render(filename=detail['name']+'.py', source_path=source_path))
for extra_file in extras:
os.remove(join(gallery_dir, extra_file))
示例15: finish
def finish(self):
# type: () -> None
I18nBuilder.finish(self)
data = dict(
version = self.config.version,
copyright = self.config.copyright,
project = self.config.project,
ctime = datetime.fromtimestamp(
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
)
for textdomain, catalog in status_iterator(iteritems(self.catalogs), # type: ignore
"writing message catalogs... ",
"darkgreen", len(self.catalogs),
self.app.verbosity,
lambda textdomain__: textdomain__[0]):
# noop if config.gettext_compact is set
ensuredir(path.join(self.outdir, path.dirname(textdomain)))
pofn = path.join(self.outdir, textdomain + '.pot')
output = StringIO()
output.write(POHEADER % data) # type: ignore
for message in catalog.messages:
positions = catalog.metadata[message]
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
output.write("#: %s\n" % "\n#: ".join( # type: ignore
"%s:%s" % (canon_path(
safe_relpath(source, self.outdir)), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
output.write("# %s\n" % "\n# ".join( # type: ignore
uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
output.write('msgid "%s"\nmsgstr ""\n\n' % message) # type: ignore
content = output.getvalue()
if should_write(pofn, content):
with open(pofn, 'w', encoding='utf-8') as pofile: # type: ignore
pofile.write(content)