本文整理汇总了Python中toolz.first函数的典型用法代码示例。如果您正苦于以下问题:Python first函数的具体用法?Python first怎么用?Python first使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了first函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: has_next_day
def has_next_day(dates_dict, year, month, day):
"""Return next day found in nested dates_dict
or None if can't find one."""
# Check current month for next days
days = sorted(dates_dict[year][month].keys())
if day != last(days):
di = days.index(day)
next_day = days[di + 1]
return {"year": year, "month": month, "day": next_day}
# dates_dict[year][month][next_day])
# Check current year for next months
months = sorted(dates_dict[year].keys())
if month != last(months):
mi = months.index(month)
next_month = months[mi + 1]
next_day = first(sorted(dates_dict[year][next_month].keys()))
return {"year": year, "month": next_month, "day": next_day}
# Check for next years
years = sorted(dates_dict.keys())
if year != last(years):
yi = years.index(year)
next_year = years[yi + 1]
next_month = first(sorted(dates_dict[next_year].keys()))
next_day = first(sorted(dates_dict[next_year][next_month].keys()))
return {"year": next_year, "month": next_month, "day": next_day}
return False
示例2: single_partition_join
def single_partition_join(left, right, **kwargs):
# if the merge is perfomed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond the current divisions
meta = pd.merge(left._meta_nonempty, right._meta_nonempty, **kwargs)
name = 'merge-' + tokenize(left, right, **kwargs)
if left.npartitions == 1:
left_key = first(left.__dask_keys__())
dsk = {(name, i): (apply, pd.merge, [left_key, right_key], kwargs)
for i, right_key in enumerate(right.__dask_keys__())}
if kwargs.get('right_index') or right._contains_index_name(
kwargs.get('right_on')):
divisions = right.divisions
else:
divisions = [None for _ in right.divisions]
elif right.npartitions == 1:
right_key = first(right.__dask_keys__())
dsk = {(name, i): (apply, pd.merge, [left_key, right_key], kwargs)
for i, left_key in enumerate(left.__dask_keys__())}
if kwargs.get('left_index') or left._contains_index_name(
kwargs.get('left_on')):
divisions = left.divisions
else:
divisions = [None for _ in left.divisions]
return new_dd_object(toolz.merge(dsk, left.dask, right.dask), name,
meta, divisions)
示例3: single_partition_join
def single_partition_join(left, right, **kwargs):
# if the merge is perfomed on_index, divisions can be kept, otherwise the
# new index will not necessarily correspond the current divisions
meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
kwargs['empty_index_dtype'] = meta.index.dtype
name = 'merge-' + tokenize(left, right, **kwargs)
if left.npartitions == 1 and kwargs['how'] in ('inner', 'right'):
left_key = first(left.__dask_keys__())
dsk = {(name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
for i, right_key in enumerate(right.__dask_keys__())}
if kwargs.get('right_index') or right._contains_index_name(
kwargs.get('right_on')):
divisions = right.divisions
else:
divisions = [None for _ in right.divisions]
elif right.npartitions == 1 and kwargs['how'] in ('inner', 'left'):
right_key = first(right.__dask_keys__())
dsk = {(name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
for i, left_key in enumerate(left.__dask_keys__())}
if kwargs.get('left_index') or left._contains_index_name(
kwargs.get('left_on')):
divisions = left.divisions
else:
divisions = [None for _ in left.divisions]
else:
raise NotImplementedError("single_partition_join has no fallback for invalid calls")
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[left, right])
return new_dd_object(graph, name, meta, divisions)
示例4: has_previous_day
def has_previous_day(dates_dict, year, month, day):
"""Return previous day found in nested dates_dict
or None if can't find one."""
days = sorted(dates_dict[year][month].keys())
# Check current month
if day != first(days):
di = days.index(day)
prev_day = days[di - 1]
return {"year": year, "month": month, "day": prev_day}
# Check current year
months = sorted(dates_dict[year].keys())
if month != first(months):
mi = months.index(month)
prev_month = months[mi - 1]
last_day = last(sorted(dates_dict[year][prev_month].keys()))
return {"year": year, "month": prev_month, "day": last_day}
# Check other years
years = sorted(dates_dict.keys())
if year != first(years):
yi = years.index(year)
prev_year = years[yi - 1]
prev_month = last(sorted(dates_dict[prev_year].keys()))
last_day = last(sorted(dates_dict[prev_year][prev_month].keys()))
return {"year": prev_year, "month": prev_month, "day": last_day}
return False
示例5: _get_larger_chroms
def _get_larger_chroms(ref_file):
"""Retrieve larger chromosomes, avoiding the smaller ones for plotting.
"""
from scipy.cluster.vq import kmeans, vq
all_sizes = []
for c in ref.file_contigs(ref_file):
all_sizes.append(float(c.size))
all_sizes.sort()
# separate out smaller chromosomes and haplotypes with kmeans
centroids, _ = kmeans(np.array(all_sizes), 2)
idx, _ = vq(np.array(all_sizes), centroids)
little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
little_sizes = [x[1] for x in little_sizes]
# create one more cluster with the smaller, removing the haplotypes
centroids2, _ = kmeans(np.array(little_sizes), 2)
idx2, _ = vq(np.array(little_sizes), centroids2)
little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
little_sizes2 = [x[1] for x in little_sizes2]
# get any chromosomes not in haplotype/random bin
thresh = max(little_sizes2)
larger_chroms = []
for c in ref.file_contigs(ref_file):
if c.size > thresh:
larger_chroms.append(c.name)
return larger_chroms
示例6: str_cat_sql
def str_cat_sql(expr, lhs, rhs, **kwargs):
left, right = first(lhs.inner_columns), first(rhs.inner_columns)
if expr.sep:
result = (left + expr.sep + right).label(expr.lhs._name)
else:
result = (left + right).label(expr.lhs._name)
return reconstruct_select([result], lhs)
示例7: test_live_migrate_anti_affinity
def test_live_migrate_anti_affinity(self):
"""
Make sure that if we have an anti-affinity group set, and we try
to live migrate to a host with the anti-affinity group, it will
fail
- Creates an
:return:
"""
data = self.setup_affinities(self.sanity)
# Make sure that the affinity and anti-aff instances are booted up
aff_inst = data["aff_instance"]
anti_inst = data["anti_instance"]
smog.nova.poll_status(aff_inst, "ACTIVE")
smog.nova.poll_status(anti_inst, "ACTIVE")
# Now, perform a live migration for the anti_inst. This should fail
# Get what host the instance is currently on, and compare before/after
discovered = self.sanity.discover()
fltrfn = lambda x: x.instance.name == "aa-test"
# In functional-speak, find the instance object in out discovered
# discovered Instance objects whose name is 'aff-test'. There should
# only be one of these, so take the first one. Use toolz.first rather
# than use index ([0]). In the general case this is better (for
# example, what if we use a generator or iterator instead of list or
# tuple. Remember, functional programming rulez!
before_inst = toolz.first(filter(fltrfn, [inst for inst in discovered]))
before_host = before_inst.host
anti_inst.live_migrate()
discovered = self.sanity.discover()
after_inst = toolz.first(filter(fltrfn, [inst for inst in discovered]))
after_host = after_inst.host
self.assertTrue(before_host.hostname == after_host.hostname)
示例8: binop_sql
def binop_sql(t, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
assert len(lhs.c) == 1, "Select cannot have more than a single column when doing" " arithmetic, got %r" % lhs
lhs = first(lhs.inner_columns)
if isinstance(rhs, Select):
assert len(rhs.c) == 1, "Select cannot have more than a single column when doing" " arithmetic, got %r" % rhs
rhs = first(rhs.inner_columns)
return t.op(lhs, rhs)
示例9: coalesce_sql_select
def coalesce_sql_select(expr, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
orig = lhs
lhs = first(lhs.inner_columns)
else:
orig = rhs
rhs = first(rhs.inner_columns)
result = sa.sql.functions.coalesce(lhs, rhs).label(expr._name)
return reconstruct_select([result], orig)
示例10: compute_up
def compute_up(expr, data, **kwargs):
name = expr._name
try:
inner_columns = list(data.inner_columns)
names = list(c.name for c in data.inner_columns)
column = inner_columns[names.index(name)]
except (KeyError, ValueError):
single_column_select = compute(expr, first(data.inner_columns), post_compute=False, return_type="native")
column = first(single_column_select.inner_columns)
result = unify_froms(sa.select([column]), data.froms + single_column_select.froms)
return result.where(unify_wheres([data, single_column_select]))
else:
return data.with_only_columns([column])
示例11: binop_sql
def binop_sql(t, lhs, rhs, **kwargs):
if isinstance(lhs, Select):
assert len(lhs.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % lhs
)
lhs = first(lhs.inner_columns)
if isinstance(rhs, Select):
assert len(rhs.c) == 1, (
'Select cannot have more than a single column when doing'
' arithmetic, got %r' % rhs
)
rhs = first(rhs.inner_columns)
return f(t, lhs, rhs)
示例12: port
def port(self):
if not self._port:
try:
self._port = first(self._sockets.values()).getsockname()[1]
except StopIteration:
raise OSError("Server has no port. Please call .listen first")
return self._port
示例13: _schema
def _schema(self):
schema = self._child.schema[0]
if isinstance(schema, Record) and len(schema.types) == 1:
result = toolz.first(schema.types)
else:
result = schema
return DataShape(result)
示例14: test_basic
def test_basic():
def test_g():
time.sleep(0.01)
def test_h():
time.sleep(0.02)
def test_f():
for i in range(100):
test_g()
test_h()
thread = threading.Thread(target=test_f)
thread.daemon = True
thread.start()
state = create()
for i in range(100):
time.sleep(0.02)
frame = sys._current_frames()[thread.ident]
process(frame, None, state)
assert state['count'] == 100
d = state
while len(d['children']) == 1:
d = first(d['children'].values())
assert d['count'] == 100
assert 'test_f' in str(d['description'])
g = [c for c in d['children'].values() if 'test_g' in str(c['description'])][0]
h = [c for c in d['children'].values() if 'test_h' in str(c['description'])][0]
assert g['count'] < h['count']
assert 95 < g['count'] + h['count'] <= 100
示例15: test_pre_compute_with_projection_projects_on_data_frames
def test_pre_compute_with_projection_projects_on_data_frames():
csv = CSV(example('iris.csv'))
s = symbol('s', discover(csv))
result = pre_compute(s[['sepal_length', 'sepal_width']].distinct(),
csv, comfortable_memory=10)
assert set(first(result).columns) == \
set(['sepal_length', 'sepal_width'])