本文整理汇总了Python中concurrent.futures.append方法的典型用法代码示例。如果您正苦于以下问题:Python futures.append方法的具体用法?Python futures.append怎么用?Python futures.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类concurrent.futures
的用法示例。
在下文中一共展示了futures.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: paralelize
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def paralelize(
objects: Sequence[Any],
worker: Callable[[Sequence[Any]], Any],
max_threads: int = 10,
) -> Sequence[concurrent.futures.Future]:
"""Paralelize tasks using connector on list of URLS.
URLs are split into up-to num_threads chunks and each chunk is processed
in its own thread. Connectors in worker method MUST be duplicated to ensure
thread safety.
:returns: collection of instance of Future objects, each one corresponding
to one thread. It is caller responsibility to check if threads have
finished successfully.
"""
number_of_chunks = min(len(objects), max_threads)
objects_chunks = chunks(objects, number_of_chunks)
futures = []
with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as executor:
for objects_chunk in objects_chunks:
futures.append(executor.submit(worker, objects_chunk))
return futures
示例2: append
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def append(self, resultFile, resultElem, all_columns=False):
"""
Append the result for one run. Needs to be called before collect_data().
"""
self._xml_results += [
(result, resultFile) for result in _get_run_tags_from_xml(resultElem)
]
for attrib, values in RunSetResult._extract_attributes_from_result(
resultFile, resultElem
).items():
self.attributes[attrib].extend(values)
if not self.columns:
self.columns = RunSetResult._extract_existing_columns_from_result(
resultFile, resultElem, all_columns
)
示例3: insert_logfile_names
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def insert_logfile_names(resultFile, resultElem):
# get folder of logfiles (truncate end of XML file name and append .logfiles instead)
log_folder = resultFile[0 : resultFile.rfind(".results.")] + ".logfiles/"
# append begin of filename
runSetName = resultElem.get("name")
if runSetName is not None:
blockname = resultElem.get("block")
if blockname is None:
log_folder += runSetName + "."
elif blockname == runSetName:
pass # real runSetName is empty
else:
assert runSetName.endswith("." + blockname)
runSetName = runSetName[: -(1 + len(blockname))] # remove last chars
log_folder += runSetName + "."
# for each file: append original filename and insert log_file_name into sourcefileElement
for sourcefile in _get_run_tags_from_xml(resultElem):
if "logfile" in sourcefile.attrib:
log_file = urllib.parse.urljoin(resultFile, sourcefile.get("logfile"))
else:
log_file = log_folder + os.path.basename(sourcefile.get("name")) + ".log"
sourcefile.set("logfile", log_file)
示例4: serve
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def serve(self):
try:
token, key, value = self.path.split('/')[1:4]
except:
self.send_response(200)
return
if self.token != token:
self.send_response(200)
return
if key in self.d:
self.d[key].append(value)
else:
self.d[key] = [value, ]
self.send_response(200)
示例5: get_chapters
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def get_chapters(self, chapter_object):
"""Queries the series details API and creates a chapter object for each
chapter listed.
"""
response = requests.get(self.api_hook_details).json()
chapters = []
for chapter in response['chapters']:
if int(chapter['chapter']['subchapter']) > 0:
chapter_number = '.'.join([chapter['chapter']['chapter'],
chapter['chapter']['subchapter']])
else:
chapter_number = chapter['chapter']['chapter']
kwargs = {
'name': self.name,
'alias': self.alias,
'chapter': chapter_number,
'api_id': chapter['chapter']['id'],
'url': chapter['chapter']['href'],
'title': chapter['chapter']['name'],
'groups': [team['name'] for team in chapter['teams']]
}
chapter = chapter_object(**kwargs)
chapters.append(chapter)
return chapters
示例6: get_chapters
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def get_chapters(self):
chapters = []
for t in self.json['taggings']:
if 'permalink' in t and 'title' in t:
name_parts = re.search(name_re, t['title'])
if not name_parts:
name_parts = re.search(fallback_re, t['title'])
chapter = name_parts.group('num')
elif name_parts.group('type') == 'Special':
chapter = 'Special ' + name_parts.group('num')
else:
chapter = name_parts.group('num')
title = name_parts.group('title')
url = urljoin('https://dynasty-scans.com/chapters/',
t['permalink'])
c = DynastyScansChapter(name=self.name, alias=self.alias,
chapter=chapter, url=url, title=title)
chapters.append(c)
return chapters
示例7: latest
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def latest(alias, relative):
"""List most recent chapter addition for series."""
query = db.session.query(db.Series)
if alias:
query = query.filter_by(following=True, alias=alias)
else:
query = query.filter_by(following=True)
query = query.order_by(db.Series.alias).all()
updates = []
for series in query:
if series.last_added is None:
time = 'never'
elif relative:
time = utility.time_to_relative(series.last_added)
else:
time = series.last_added.strftime('%Y-%m-%d %H:%M')
updates.append((series.alias, time))
output.even_columns(updates, separator_width=3)
示例8: main
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def main(_):
tf_config = os.environ.get('TF_CONFIG', None)
logging.info(tf_config)
config = json.loads(tf_config)
job_type = config.get('task', {}).get('type')
os.environ.update({'PYTHONPATH': '/'})
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=FLAGS.actors_per_worker)
futures = []
if job_type == 'master':
futures.append(run_learner(executor, config))
else:
assert job_type == 'worker', 'Unexpected task type: {}'.format(job_type)
for actor_id in range(FLAGS.actors_per_worker):
futures.append(run_actor(executor, config, actor_id))
for f in futures:
f.result()
示例9: _get_paginated_response
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def _get_paginated_response(self, url, params=None):
url = self.add_slash(url)
response_data = self._get_first_page(url, params)
count = response_data.get('count', 0)
page_count = int(math.ceil(float(count) / PAGINATION_LIMIT))
self._logger.debug('Calculated that there are {} pages to get'.format(page_count))
for result in response_data.get('results', []):
yield result
if page_count:
with concurrent.futures.ThreadPoolExecutor(max_workers=25) as executor:
futures = []
if not params:
params = {}
for index in range(page_count, 1, -1):
params.update({'page': index})
futures.append(executor.submit(self.session.get, url, params=params.copy()))
for future in concurrent.futures.as_completed(futures):
try:
response = future.result()
response_data = response.json()
response.close()
for result in response_data.get('results'):
yield result
except Exception: # pylint: disable=broad-except
self._logger.exception('Future failed...')
示例10: get_urls
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def get_urls(inputfiles):
"""
This function takes as input the list of files containing the hostnames
and normalizes the format of the hostnames in order to be able to perform
valid HTTP/HTTPS requests.
Args:
inputfiles -- list of inputfiles
Returns:
urls -- list of normalized URLs which can be queries
"""
urls = []
scheme_rgx = re.compile(r'^https?://')
for ifile in inputfiles:
urls.append(ifile.read().splitlines())
urls = set([n for l in urls for n in l])
urls = list(filter(None, urls))
for i in range(len(urls)):
if not scheme_rgx.match(urls[i]):
urls[i] = 'http://' + urls[i]
return urls
示例11: new_check
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def new_check(self):
c = Check()
self.checks.append(c)
return c
示例12: handle_union_tag
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def handle_union_tag(
tag, table_definition_file, options, default_columns, columns_relevant_for_diff
):
columns = (
extract_columns_from_table_definition_file(tag, table_definition_file)
or default_columns
)
result = RunSetResult([], collections.defaultdict(list), columns)
all_result_files = set()
for resultTag in tag.findall("result"):
if extract_columns_from_table_definition_file(resultTag, table_definition_file):
logging.warning(
"<result> tags within <union> tags may not contain <column> tags, "
"these column declarations will be ignored. Please move them to the <union> tag."
)
run_set_id = resultTag.get("id")
for resultsFile in get_file_list_from_result_tag(
resultTag, table_definition_file
):
if resultsFile in all_result_files:
handle_error("File '%s' included twice in <union> tag", resultsFile)
all_result_files.add(resultsFile)
result_xml = parse_results_file(resultsFile, run_set_id)
if result_xml is not None:
result.append(resultsFile, result_xml, options.all_columns)
if not result._xml_results:
return None
name = tag.get("name")
if name:
logging.warning(
"Attribute 'name' for <union> tags is deprecated, use 'title' instead."
)
name = tag.get("title", name)
if name:
result.attributes["name"] = [name]
result.collect_data(options.correct_only)
return result
示例13: extract_columns_from_table_definition_file
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def extract_columns_from_table_definition_file(xmltag, table_definition_file):
"""
Extract all columns mentioned in the result tag of a table definition file.
"""
def handle_path(path):
"""Convert path from a path relative to table-definition file."""
if not path or path.startswith("http://") or path.startswith("https://"):
return path
return os.path.join(os.path.dirname(table_definition_file), path)
columns = []
for c in xmltag.findall("column"):
scale_factor = c.get("scaleFactor")
display_unit = c.get("displayUnit")
source_unit = c.get("sourceUnit")
new_column = Column(
c.get("title"),
c.text,
c.get("numberOfDigits"),
handle_path(c.get("href")),
None,
display_unit,
source_unit,
scale_factor,
c.get("relevantForDiff"),
c.get("displayTitle"),
)
columns.append(new_column)
return columns
示例14: collect_data
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def collect_data(self, correct_only):
"""
Load the actual result values from the XML file and the log files.
This may take some time if many log files have to be opened and parsed.
"""
self.results = []
def get_value_from_logfile(lines, identifier):
"""
This method searches for values in lines of the content.
It uses a tool-specific method to so.
"""
return load_tool(self).get_value_from_output(lines, identifier)
# Opening the ZIP archive with the logs for every run is too slow, we cache it.
log_zip_cache = {}
try:
for xml_result, result_file in self._xml_results:
self.results.append(
RunResult.create_from_xml(
xml_result,
get_value_from_logfile,
self.columns,
correct_only,
log_zip_cache,
self.columns_relevant_for_diff,
result_file,
)
)
finally:
for file in log_zip_cache.values():
file.close()
for column in self.columns:
column_values = (
run_result.values[run_result.columns.index(column)]
for run_result in self.results
)
column.set_column_type_from(column_values)
del self._xml_results
示例15: merge_task_lists
# 需要导入模块: from concurrent import futures [as 别名]
# 或者: from concurrent.futures import append [as 别名]
def merge_task_lists(runset_results, tasks):
"""
Set the filelists of all RunSetResult elements so that they contain the same files
in the same order. For missing files a dummy element is inserted.
"""
for runset in runset_results:
# create mapping from id to RunResult object
# Use reversed list such that the first instance of equal tasks end up in dic
dic = {
run_result.task_id: run_result for run_result in reversed(runset.results)
}
runset.results = [] # clear and repopulate results
for task in tasks:
run_result = dic.get(task)
if run_result is None:
logging.info(" No result for task %s in '%s'.", task, runset)
# create an empty dummy element
run_result = RunResult(
task,
None,
"empty", # special category for tables
None,
None,
runset.columns,
[None] * len(runset.columns),
)
runset.results.append(run_result)