本文整理汇总了Python中more_itertools.chunked函数的典型用法代码示例。如果您正苦于以下问题:Python chunked函数的具体用法?Python chunked怎么用?Python chunked使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了chunked函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_context_data
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
context['courses_slides'] = chunked(context['homepage'].promoted_courses.all(), 3)
context['menthors_slides'] = chunked(context['homepage'].promoted_menthors.all(), 3)
context['promoted_portfolios'] = Portfolio.objects.filter(
home_published=True, status='published').order_by('-timestamp')[:8]
return context
示例2: group_by_magnitude
def group_by_magnitude(collection):
alen = len(collection)
if alen > 1000:
return chunked(collection, 100)
if alen > 100:
return chunked(collection, 10)
return [collection]
示例3: chunked_join
def chunked_join(iterable, int1, int2, str1, str2, func):
"""Chunk and join."""
chunks = list(chunked(iterable, int1))
logging.debug(chunks)
groups = [list(chunked(chunk, int2)) for chunk in chunks]
logging.debug(groups)
return str1.join([
str2.join([func(''.join(chunk)) for chunk in chunks])
for chunks in groups
])
示例4: _iter_cores
def _iter_cores(cores, ncontainer):
full_cores, part_cores = cores.get('full', []), cores.get('part', [])
if not (full_cores or part_cores):
return (([], []) for _ in range(ncontainer))
return izip_longest(
chunked(full_cores, len(full_cores)/ncontainer),
chunked(part_cores, len(part_cores)/ncontainer),
fillvalue=[]
)
示例5: decl
def decl(self):
logging.debug(_('args: %s'), self.args)
args = self.args.strip().replace('__user ', '').split(',')
logging.debug(_('args: %s'), args)
args = [''.join(pair) for pair in chunked(args, 2)]
return 'long {}({});'.format(
self.name.strip(), ', '.join(args))
示例6: __init__
def __init__(self, recs):
self.argslist = []
# TODO make these separate nodes
rec_values = (rec.value for rec in recs)
for name, value, type_ in chunked(rec_values, 3):
self.argslist.append((name, value, type_))
示例7: parse_obj
def parse_obj(self,obj,dtype):
dic = OD((('type','Feature'),('geometry',OD()),('properties',OD())))
dic['properties']['class'] = dtype
for child in obj:
ctag = self.clip_tag(child.tag)
if ctag in ['pos','area','loc']:
if ctag == 'area':
dic['geometry']['type'] = 'Polygon'
dic['geometry']['coordinates'] = self.get_polygon_coord(child)
else:
if ctag == 'pos':
dic['geometry']['type'] = 'Point'
elif ctag == 'loc':
dic['geometry']['type'] = 'LineString'
i = ""
for l in child.itertext():
i += l
l = list(chunked(i.strip().split(),2))
i = [[float(xy[1]),float(xy[0])] for xy in l]
if len(i) == 1:
dic['geometry']['coordinates'] = i[0]
else:
dic['geometry']['coordinates'] = i
elif not child.text.strip() == '':
dic['properties'][ctag]=child.text
else:
i = ''
for l in child.itertext():
i += l
dic['properties'][ctag]=i.strip()
dic = self.chk_types(dic)
return dic
示例8: cooccurrence
def cooccurrence(
corpus,
execnet_hub,
targets,
context,
paths_progress_iter,
output=('o', 'space.h5', 'The output space file.'),
):
"""Build the co-occurrence matrix."""
if targets.index.nlevels > 1:
targets.sortlevel(inplace=True)
if context.index.nlevels > 1:
context.sortlevel(inplace=True)
def init(channel):
channel.send(
(
'data',
pickle.dumps(
{
'kwargs': {
'targets': targets,
'context': context,
},
'instance': corpus,
'folder_name': 'cooccurrence',
},
)
)
)
results = execnet_hub.run(
remote_func=sum_folder,
iterable=paths_progress_iter,
init_func=init,
)
results = ([r] for r in results if r is not None)
result = next(results)[0]
for i, chunk in enumerate(chunked(results, 100)):
logger.info('Received result chunk #%s.', i)
chunked_result = [c[0] for c in chunk]
with Timer() as timed:
result = pd.concat(
chunked_result + [result],
copy=False,
).groupby(level=result.index.names).sum()
logger.info(
'Computed the result by merging a chunk of received results and the result in %.2f seconds.',
timed.elapsed,
)
result = result.to_frame('count')
result.reset_index(inplace=True)
write_space(output, context, targets, result)
示例9: add_to_spotify
def add_to_spotify(db, spotify, album, original_artist, original_album):
album = spotify.album(album["uri"])
tracks = album["tracks"]
track_ids = [t["uri"] for t in tracks["items"]]
while tracks["next"]:
tracks = spotify.next(tracks)
track_ids.extend(t["uri"] for t in tracks["items"])
click.echo("Adding {0} tracks to Spotify...".format(len(track_ids)))
for chunk in chunked(track_ids, 50):
response = spotify.current_user_saved_tracks_add(chunk)
if response is not None:
click.secho("Fuck, something broke:")
pprint(response)
click.confirm("Continue?", abort=True)
return
cursor = db.cursor()
cursor.execute(
"""UPDATE collection SET complete = 1
WHERE artist = ? AND album = ?""",
[original_artist, original_album],
)
db.commit()
click.secho("Done ", fg="green", nl=False)
time.sleep(0.25)
示例10: get_random_logs
def get_random_logs(self, limit):
count = min(limit, self.db.count())
ids = self.db.find({}, {'_id': 1})
rand_ids = [r['_id'] for r in random.sample(list(ids), count)]
for rand_ids_chunk in chunked(rand_ids, 100):
query = {'_id': {'$in': rand_ids_chunk}}
for doc in self.db.find(query, {'message': 1}):
yield doc['message']
示例11: parallelize_func
def parallelize_func(iterable, func, chunksz=1, n_jobs=16, *args, **kwargs):
""" Parallelize a function over each element of an iterable. """
chunker = func
chunks = more_itertools.chunked(iterable, chunksz)
chunks_results = Parallel(n_jobs=n_jobs, verbose=50)(
delayed(chunker)(chunk, *args, **kwargs) for chunk in chunks)
results = more_itertools.flatten(chunks_results)
return list(results)
示例12: start
def start(experiment_description, agent, environment, results_descriptor):
"""Kick off the execution of an experiment."""
initialize_results(results_descriptor)
interval_results = islice(interval_results_generator(agent, environment, experiment_description), experiment_description.num_steps)
results_interval_chunks = chunked(interval_results, results_descriptor.interval)
for chunk in results_interval_chunks:
results = [interval_data.results for interval_data in chunk]
write_results(merge_results(results), results_descriptor)
示例13: create_partials
def create_partials(self, product, branch, platform, locales, revision,
chunk_name=1):
"""Calculates "from" and "to" MAR URLs and calls create_task_graph().
Currently "from" MAR is 2 releases behind to avoid duplication of
existing CI partials.
:param product: capitalized product name, AKA appName, e.g. Firefox
:param branch: branch name (mozilla-central)
:param platform: buildbot platform (linux, macosx64)
:param locales: list of locales
:param revision: revision of the "to" build
:param chunk_name: chunk name
"""
# TODO: move limit to config
# Get last 5 releases (including current),
# generate partial for 4 latest
last_releases = self.balrog_client.get_releases(product, branch)[:5]
release_to = last_releases.pop(0)
per_chunk = 5
for update_number, release_from in enumerate(last_releases, start=1):
log.debug("From: %s", release_from)
log.debug("To: %s", release_to)
for n, chunk in enumerate(chunked(locales, per_chunk), start=1):
extra = []
for locale in chunk:
try:
build_from = self.balrog_client.get_build(
release_from, platform, locale)
log.debug("Build from: %s", build_from)
build_to = self.balrog_client.get_build(
release_to, platform, locale)
log.debug("Build to: %s", build_to)
from_mar = build_from["completes"][0]["fileUrl"]
to_mar = build_to["completes"][0]["fileUrl"]
extra.append({
"locale": locale,
"from_mar": from_mar,
"to_mar": to_mar,
})
except (requests.HTTPError, ValueError):
log.exception(
"Error getting build, skipping this scenario")
if extra:
if len(locales) > per_chunk:
# More than 1 chunk
subchunk = n
else:
subchunk = None
all_locales = [e["locale"] for e in extra]
log.info("New Funsize task for %s", all_locales)
self.submit_task_graph(
branch=branch, revision=revision, platform=platform,
update_number=update_number, chunk_name=chunk_name,
extra=extra, subchunk=subchunk)
else:
log.warn("Nothing to submit")
示例14: score
def score(self, rev_ids, caches=None, cache=None):
if isinstance(rev_ids, int):
rev_ids = [rev_ids]
batches = batch_rev_caches(chunked(rev_ids, self.batch_size), caches,
cache)
for batch_scores in self.scores_ex.map(self._score_batch, batches):
for score in batch_scores:
yield score
示例15: c_layout
def c_layout(i, definition, template):
c_name = layer_names[i]
pretty_name = c_name.strip('_').capitalize()
layout = d['layout']
surround = lambda s: ''.join(interleave_longest(['│']*(len(s)+1), s))
layer = list(map(uni, definition))
layer[41] = layer[41].center(11)
layer = chunked(layer, 12)
rows = intersperse(mid, map(surround, layer))
pretty = '\n'.join(itertools.chain([top], rows, [bottom]))
surround = lambda s: ', '.join(s)
layer = list(map(lambda k: layer_name.get(k, k), definition))
layer = chunked(layer, 12)
rows = map(surround, layer)
c_layer = ',\n '.join(itertools.chain([], rows, []))
return template.format(pretty_name, pretty, c_name, layout, c_layer)