本文整理汇总了Python中toolz.keymap函数的典型用法代码示例。如果您正苦于以下问题:Python keymap函数的具体用法?Python keymap怎么用?Python keymap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了keymap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_down
def compute_down(expr,
ec,
profiler_output=None,
compute_kwargs=None,
odo_kwargs=None,
**kwargs):
"""Compute down for blaze clients.
Parameters
----------
expr : Expr
The expression to send to the server.
ec : Client
The blaze client to compute against.
namespace : dict[Symbol -> any], optional
The namespace to compute the expression in. This will be amended to
include that data for the server. By default this will just be the
client mapping to the server's data.
compute_kwargs : dict, optional
Extra kwargs to pass to compute on the server.
odo_kwargs : dict, optional
Extra kwargs to pass to odo on the server.
profile : bool, optional
Should blaze server run cProfile over the computation of the expression
and the serialization of the response.
profiler_output : file-like object, optional
A file like object to hold the profiling output from the server.
If this is not passed then the server will write the data to the
server's filesystem
"""
from .server import to_tree
kwargs = keymap(u8, kwargs)
tree = to_tree(expr)
serial = ec.serial
if profiler_output is not None:
kwargs[u'profile'] = True
kwargs[u'profiler_output'] = ':response'
kwargs[u'compute_kwargs'] = keymap(u8, compute_kwargs or {})
kwargs[u'odo_kwargs'] = keymap(u8, odo_kwargs or {})
r = post(
ec,
'/compute',
data=serial.dumps(assoc(kwargs, u'expr', tree)),
auth=ec.auth,
headers=mimetype(serial),
)
if not ok(r):
raise ValueError("Bad response: %s" % reason(r))
response = serial.loads(content(r))
if profiler_output is not None:
profiler_output.write(response[u'profiler_output'])
return serial.data_loads(response[u'data'])
示例2: loads
def loads(b):
""" Transform bytestream back into Python value """
header_length, = struct.unpack('I', b[:4])
if header_length:
header = msgpack.loads(b[4: header_length + 4], encoding='utf8')
else:
header = {}
payload = b[header_length + 4:]
if header.get('compression'):
try:
decompress = compressions[header['compression']]['decompress']
payload = decompress(payload)
except KeyError:
raise ValueError("Data is compressed as %s but we don't have this"
" installed" % header['compression'].decode())
msg = msgpack.loads(payload, encoding='utf8')
if header.get('decode'):
if isinstance(msg, dict) and msg:
msg = keymap(bytes.decode, msg)
elif isinstance(msg, bytes):
msg = msg.decode()
else:
raise TypeError("Asked to decode a %s" % type(msg).__name__)
return msg
示例3: test_novel_deltas_macro
def test_novel_deltas_macro(self):
asset_info = asset_infos[0][0]
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0, 1),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': repeat_last_axis(
np.array([10.0, 10.0, 10.0]),
nassets,
),
'2014-01-06': repeat_last_axis(
np.array([10.0, 10.0, 11.0]),
nassets,
),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
示例4: _sniff_dialect
def _sniff_dialect(self, path):
kwargs = self._kwargs
dialect = sniff_dialect(path, self._sniff_nbytes,
encoding=self.encoding)
kwargs = merge(dialect, keymap(alias, kwargs))
return valfilter(lambda x: x is not None,
dict((d, kwargs[d])
for d in dialect_terms if d in kwargs))
示例5: _scatter
def _scatter(self, data, workers=None, broadcast=False):
""" Scatter data to local data dictionary
Rather than send data out to the cluster we keep data local. However
we do report to the scheduler that the local worker has the scattered
data. This allows other workers to come by and steal this data if
desired.
Keywords like ``broadcast=`` do not work, however operations like
``.replicate`` work fine after calling scatter, which can fill in for
this functionality.
"""
with log_errors():
if not (workers is None and broadcast is False):
raise NotImplementedError("Scatter from worker doesn't support workers or broadcast keywords")
if isinstance(data, dict) and not all(isinstance(k, (bytes, str))
for k in data):
d = yield self._scatter(keymap(tokey, data), workers, broadcast)
raise gen.Return({k: d[tokey(k)] for k in data})
if isinstance(data, (list, tuple, set, frozenset)):
keys = []
for x in data:
try:
keys.append(tokenize(x))
except:
keys.append(str(uuid.uuid1()))
data2 = dict(zip(keys, data))
elif isinstance(data, dict):
keys = set(data)
data2 = data
else:
raise TypeError("Don't know how to scatter %s" % type(data))
nbytes = valmap(sizeof, data2)
# self.worker.data.update(data2) # thread safety matters
self.worker.loop.add_callback(self.worker.data.update, data2)
yield self.scheduler.update_data(
who_has={key: [self.worker.address] for key in data2},
nbytes=valmap(sizeof, data2),
client=self.id)
if isinstance(data, dict):
out = {k: Future(k, self) for k in data}
elif isinstance(data, (tuple, list, set, frozenset)):
out = type(data)([Future(k, self) for k in keys])
else:
raise TypeError(
"Input to scatter must be a list or dict")
for key in keys:
self.futures[key]['status'] = 'finished'
self.futures[key]['event'].set()
raise gen.Return(out)
示例6: test_novel_deltas
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([pd.Timestamp("2014-01-01"), pd.Timestamp("2014-01-04")])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame(
{
"sid": self.sids * 2,
"value": (0, 1, 2, 1, 2, 3),
"asof_date": repeated_dates,
"timestamp": repeated_dates,
}
)
expr = bz.Data(baseline, name="expr", dshape=self.dshape)
deltas = bz.Data(baseline, name="deltas", dshape=self.dshape)
deltas = bz.transform(deltas, value=deltas.value + 10, timestamp=deltas.timestamp + timedelta(days=1))
expected_views = keymap(
pd.Timestamp,
{
"2014-01-03": np.array([[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [10.0, 11.0, 12.0]]),
"2014-01-06": np.array([[10.0, 11.0, 12.0], [10.0, 11.0, 12.0], [11.0, 12.0, 13.0]]),
},
)
if len(asset_info) == 4:
expected_views = valmap(lambda view: np.c_[view, [np.nan, np.nan, np.nan]], expected_views)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex(
[
pd.Timestamp("2014-01-01"),
pd.Timestamp("2014-01-02"),
pd.Timestamp("2014-01-03"),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp("2014-01-06"),
]
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
expected_output_buffer,
index=pd.MultiIndex.from_product(
(sorted(expected_views.keys()), finder.retrieve_all(asset_info.index))
),
columns=("value",),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
示例7: test_deltas
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, dshape=self.dshape)
deltas = bz.Data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
示例8: read
def read(cls, rootdir):
path = cls.metadata_path(rootdir)
with open(path) as fp:
raw_data = json.load(fp)
try:
version = raw_data['version']
except KeyError:
# Version was first written with version 1, assume 0,
# if version does not match.
version = 0
default_ohlc_ratio = raw_data['ohlc_ratio']
if version >= 1:
minutes_per_day = raw_data['minutes_per_day']
else:
# version 0 always assumed US equities.
minutes_per_day = US_EQUITIES_MINUTES_PER_DAY
if version >= 2:
calendar = get_calendar(raw_data['calendar_name'])
start_session = pd.Timestamp(
raw_data['start_session'], tz='UTC')
end_session = pd.Timestamp(raw_data['end_session'], tz='UTC')
else:
# No calendar info included in older versions, so
# default to NYSE.
calendar = get_calendar('NYSE')
start_session = pd.Timestamp(
raw_data['first_trading_day'], tz='UTC')
end_session = calendar.minute_to_session_label(
pd.Timestamp(
raw_data['market_closes'][-1], unit='m', tz='UTC')
)
if version >= 3:
ohlc_ratios_per_sid = raw_data['ohlc_ratios_per_sid']
if ohlc_ratios_per_sid is not None:
ohlc_ratios_per_sid = keymap(int, ohlc_ratios_per_sid)
else:
ohlc_ratios_per_sid = None
return cls(
default_ohlc_ratio,
ohlc_ratios_per_sid,
calendar,
start_session,
end_session,
minutes_per_day,
version=version,
)
示例9: __init__
def __init__(self, path, has_header='no-input', encoding='utf-8', **kwargs):
self.path = path
if has_header == 'no-input':
if not os.path.exists(path):
self.has_header = True
else:
self.has_header = None
else:
self.has_header = has_header
self.encoding = encoding
kwargs = keymap(alias, kwargs)
self.dialect = dict((d, kwargs[d]) for d in dialect_terms
if d in kwargs)
示例10: test_deltas_only_one_delta_in_universe
def test_deltas_only_one_delta_in_universe(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = pd.DataFrame({
'sid': [65, 66],
'asof_date': [self.dates[1], self.dates[0]],
'timestamp': [self.dates[2], self.dates[1]],
'value': [10, 11],
})
deltas = bz.Data(deltas, name='deltas', dshape=self.dshape)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[0.0, 11.0, 2.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[10.0, 2.0, 3.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[2.0, 3.0, 4.0],
[2.0, 3.0, 4.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
columns=[
'value',
],
data=np.array([11, 10, 4]).repeat(len(asset_info.index)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
示例11: __init__
def __init__(self, path, has_header=None, encoding='utf-8',
sniff_nbytes=10000, **kwargs):
self.path = path
if has_header is None:
self.has_header = (not os.path.exists(path) or
infer_header(path, sniff_nbytes))
else:
self.has_header = has_header
self.encoding = encoding if encoding is not None else 'utf-8'
kwargs = merge(sniff_dialect(path, sniff_nbytes, encoding=encoding),
keymap(alias, kwargs))
self.dialect = valfilter(bool,
dict((d, kwargs[d])
for d in dialect_terms if d in kwargs))
示例12: patch_cacheops
def patch_cacheops(g):
REDIS_URL = g.get('REDIS_URL')
if not REDIS_URL:
return
log_setting('CACHEOPS', 'is enabled')
g['CACHEOPS_REDIS'] = keymap(str.lower, dj_redis_url.parse(REDIS_URL))
g['INSTALLED_APPS'].append('cacheops')
g['CACHEOPS_DEGRADE_ON_FAILURE'] = True
g['CACHEOPS_DEFAULTS'] = {'timeout': IN_SECONDS.FIFTEEN_MINUTES}
g['CACHEOPS'] = {
# Automatically cache any User.objects.get() calls for 15 minutes
# This includes request.user or post.author access,
# where Post.author is a foreign key to auth.User
'auth.user': {'ops': 'get'},
'core.user': {'ops': 'get'},
# Automatically cache all gets and queryset fetches
# to other django.contrib.auth models for an hour
'auth.*': {'ops': ('fetch', 'get'), 'timeout': IN_SECONDS.ONE_HOUR},
# Cache gets, fetches, counts and exists to Permission
# 'all' is just an alias for ('get', 'fetch', 'count', 'exists')
'auth.permission': {'ops': 'all', 'timeout': IN_SECONDS.ONE_HOUR},
# Basically Never changing objects. Allow local_get (in memory)
'event.event': {'ops': 'all', 'local_get': True},
'ticket.tickettype': {'ops': 'all', 'local_get': True},
'ticket.tickettier': {'ops': 'all', 'local_get': True},
'ticket.ticketaddontype': {'ops': 'all', 'local_get': False},
# Enable manual caching on all other models with default timeout of an hour
# Use Post.objects.cache().get(...)
# or Tags.objects.filter(...).order_by(...).cache()
# to cache particular ORM request.
# Invalidation is still automatic
'*.*': {'ops': (), 'timeout': IN_SECONDS.ONE_HOUR},
# And since ops is empty by default you can rewrite last line as:
'*.*': {'timeout': IN_SECONDS.ONE_HOUR},
}
示例13: test_deltas_macro
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
示例14: schema
def schema(self):
"""
Examples
--------
>>> from blaze import symbol
>>> t = symbol('t', 'var * {name: string, amount: int}')
>>> s = symbol('t', 'var * {name: string, id: int}')
>>> join(t, s).schema
dshape("{name: string, amount: int32, id: int32}")
>>> join(t, s, how='left').schema
dshape("{name: string, amount: int32, id: ?int32}")
Overlapping but non-joined fields append _left, _right
>>> a = symbol('a', 'var * {x: int, y: int}')
>>> b = symbol('b', 'var * {x: int, y: int}')
>>> join(a, b, 'x').fields
['x', 'y_left', 'y_right']
"""
option = lambda dt: dt if isinstance(dt, Option) else Option(dt)
on_left = self.on_left
if not isinstance(on_left, list):
on_left = on_left,
on_right = self.on_right
if not isinstance(on_right, list):
on_right = on_right,
right_types = keymap(
dict(zip(on_right, on_left)).get,
self.rhs.dshape.measure.dict,
)
joined = (
(name, promote(dt, right_types[name], promote_option=False))
for n, (name, dt) in enumerate(filter(
compose(op.contains(on_left), first),
self.lhs.dshape.measure.fields,
))
)
left = [
(name, dt) for name, dt in zip(
self.lhs.fields,
types_of_fields(self.lhs.fields, self.lhs)
) if name not in on_left
]
right = [
(name, dt) for name, dt in zip(
self.rhs.fields,
types_of_fields(self.rhs.fields, self.rhs)
) if name not in on_right
]
# Handle overlapping but non-joined case, e.g.
left_other = set(name for name, dt in left if name not in on_left)
right_other = set(name for name, dt in right if name not in on_right)
overlap = left_other & right_other
left_suffix, right_suffix = self.suffixes
left = ((name + left_suffix if name in overlap else name, dt)
for name, dt in left)
right = ((name + right_suffix if name in overlap else name, dt)
for name, dt in right)
if self.how in ('right', 'outer'):
left = ((name, option(dt)) for name, dt in left)
if self.how in ('left', 'outer'):
right = ((name, option(dt)) for name, dt in right)
return dshape(Record(chain(joined, left, right)))
示例15: get
def get(self):
resp = keymap(str, valmap(sizeof, self.server.data))
self.write(resp)