本文整理汇总了Python中toolz.keyfilter函数的典型用法代码示例。如果您正苦于以下问题:Python keyfilter函数的具体用法?Python keyfilter怎么用?Python keyfilter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了keyfilter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resource_bcolz
def resource_bcolz(rootdir, **kwargs):
if os.path.exists(rootdir):
kwargs = keyfilter(carray_keywords.__contains__, kwargs)
return ctable(rootdir=rootdir, **kwargs)
else:
if 'dshape' in kwargs:
dtype = to_numpy_dtype(kwargs['dshape'])
kwargs = keyfilter(carray_keywords.__contains__, kwargs)
return ctable(np.empty(0, dtype), rootdir=rootdir, **kwargs)
else:
raise ValueError("File does not exist and no `dshape=` given")
示例2: append_table_to_csv
def append_table_to_csv(csv, selectable, dshape=None, **kwargs):
kwargs = keyfilter(keywords(CopyToCSV).__contains__,
merge(csv.dialect, kwargs))
stmt = CopyToCSV(selectable, os.path.abspath(csv.path), **kwargs)
with selectable.bind.connect() as conn:
conn.execute(stmt)
return csv
示例3: to_frame
def to_frame(self, columns=None):
"""
Make a DataFrame with the given columns.
Parameters
----------
columns : sequence, optional
Sequence of the column names desired in the DataFrame.
If None all columns are returned, including registered columns.
Returns
-------
frame : pandas.DataFrame
"""
extra_cols = _columns_for_table(self.name)
if columns:
local_cols = [c for c in self._frame.columns
if c in columns and c not in extra_cols]
extra_cols = toolz.keyfilter(lambda c: c in columns, extra_cols)
df = self._frame[local_cols].copy()
else:
df = self._frame.copy()
for name, col in extra_cols.items():
df[name] = col()
return df
示例4: _collect_injectables
def _collect_injectables(names):
"""
Find all the injectables specified in `names`.
Parameters
----------
names : list of str
Returns
-------
injectables : dict
Keys are the names, values are wrappers if the injectable
is a table. If it's a plain injectable the value itself is given
or the injectable function is evaluated.
"""
names = set(names)
dicts = toolz.keyfilter(
lambda x: x in names, toolz.merge(_INJECTABLES, _TABLES))
if set(dicts.keys()) != names:
raise KeyError(
'not all injectables found. '
'missing: {}'.format(names - set(dicts.keys())))
for name, thing in dicts.items():
if isinstance(thing, _InjectableFuncWrapper):
dicts[name] = thing()
elif isinstance(thing, _TableSourceWrapper):
dicts[name] = thing.convert()
return dicts
示例5: resource_sql
def resource_sql(uri, *args, **kwargs):
kwargs2 = keyfilter(keywords(sa.create_engine).__contains__, kwargs)
engine = create_engine(uri, **kwargs2)
ds = kwargs.get('dshape')
schema = kwargs.get('schema')
# we were also given a table name
if args and isinstance(args[0], (str, unicode)):
table_name, args = args[0], args[1:]
metadata = metadata_of_engine(engine, schema=schema)
with ignoring(sa.exc.NoSuchTableError):
return attach_schema(sa.Table(table_name, metadata, autoload=True,
autoload_with=engine, schema=schema),
schema)
if ds:
t = dshape_to_table(table_name, ds, metadata=metadata)
t.create()
return t
else:
raise ValueError("Table does not exist and no dshape provided")
# We were not given a table name
if ds:
create_from_datashape(engine, ds, schema=schema)
return engine
示例6: into
def into(a, b, **kwargs):
dialect = b.dialect.copy()
del dialect['lineterminator']
dates = [i for i, typ in enumerate(b.schema[0].types)
if 'date' in str(typ)]
schema = b.schema
if '?' in str(schema):
schema = dshape(str(schema).replace('?', ''))
dtypes = valmap(to_numpy_dtype, schema[0].dict)
datenames = [name for name in dtypes
if np.issubdtype(dtypes[name], np.datetime64)]
dtypes = dict((k, v) for k, v in dtypes.items()
if not np.issubdtype(v, np.datetime64))
if 'strict' in dialect:
del dialect['strict']
# Pass only keyword arguments appropriate for read_csv
kws = keywords(pd.read_csv)
options = toolz.merge(dialect, kwargs)
options = toolz.keyfilter(lambda k: k in kws, options)
if b.open == gzip.open:
options['compression'] = 'gzip'
return pd.read_csv(b.path,
skiprows=1 if b.header else 0,
dtype=dtypes,
parse_dates=datenames,
names=b.columns,
**options)
示例7: pick
def pick(whitelist, d):
sub = toolz.keyfilter(
lambda key: key in whitelist, d)
if isinstance(d, DD):
return DD(sub)
else:
return sub
示例8: resource_bcolz
def resource_bcolz(uri, dshape=None, **kwargs):
if os.path.exists(uri):
return ctable(rootdir=uri)
else:
if not dshape:
raise ValueError("Must specify either existing bcolz directory or"
"valid datashape")
dshape = datashape.dshape(dshape)
dt = datashape.to_numpy_dtype(dshape)
x = np.empty(shape=(0,), dtype=dt)
if datashape.predicates.isrecord(dshape.measure):
return ctable(x, rootdir=uri, **keyfilter(keywords.__contains__, kwargs))
else:
return carray(x, rootdir=uri, **keyfilter(keywords.__contains__, kwargs))
示例9: resource_bcolz
def resource_bcolz(uri, dshape=None, expected_dshape=None, **kwargs):
if os.path.exists(uri):
try:
return ctable(rootdir=uri)
except IOError: # __rootdirs__ doesn't exist because we aren't a ctable
return carray(rootdir=uri)
else:
if not dshape:
raise ValueError("Must specify either existing bcolz directory or"
" valid datashape")
dshape = datashape.dshape(dshape)
dt = datashape.to_numpy_dtype(dshape)
shape_tail = tuple(map(int, dshape.shape[1:])) # tail of shape
if dshape.shape[0] == datashape.var:
shape = (0,) + shape_tail
else:
shape = (int(dshape.shape[0]),) + shape_tail
x = np.empty(shape=shape, dtype=dt)
kwargs = keyfilter(keywords.__contains__, kwargs)
expectedlen = kwargs.pop('expectedlen',
int(expected_dshape[0])
if expected_dshape is not None and
isinstance(expected_dshape[0], datashape.Fixed)
else None)
if datashape.predicates.isrecord(dshape.measure):
return ctable(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
else:
return carray(x, rootdir=uri, expectedlen=expectedlen, **kwargs)
示例10: details
def details(lat_lon):
"""
Gives more details about a lat_lon from the lat_lons function.
"""
return {'location': lat_lon,
# we remove the events field because it contains some user's name
'data': keyfilter(lambda k: k != 'events', requests.post(DETAILS, data=lat_lon).json())}
示例11: into
def into(a, b, **kwargs):
if isinstance(a, type):
kwargs = keyfilter(carray_keywords.__contains__, kwargs)
return carray(b, **kwargs)
else:
a.append(b)
a.flush()
return a
示例12: _build_particles
def _build_particles(stream_name, parameters, data):
subset = keyfilter(lambda k: k in parameters, data)
grouped = OmsExtractor._group_by_timestamp(subset)
particles = []
for timestamp, attrs in grouped.iteritems():
attrs = OmsExtractor._convert_attrs_to_ion(parameters, attrs)
particles.append(OmsExtractor._build_particle(stream_name, timestamp, attrs))
return particles
示例13: _csv_to_DataFrame
def _csv_to_DataFrame(c, dshape=None, chunksize=None, **kwargs):
has_header = kwargs.pop('has_header', c.has_header)
if has_header is False:
header = None
elif has_header is True:
header = 0
else:
header = 'infer'
sep = kwargs.pop('sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.get('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
compression = kwargs.pop('compression',
{'gz': 'gzip', 'bz2': 'bz2'}.get(ext(c.path)))
# See read_csv docs for header for reasoning
if names:
try:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression, nrows=1)
except StopIteration:
found_names = pd.read_csv(c.path, encoding=encoding,
compression=compression)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs2 = keyfilter(keywords(pandas.read_csv).__contains__, kwargs)
return pandas.read_csv(c.path,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
compression=compression,
chunksize=chunksize,
usecols=usecols,
**kwargs2)
示例14: get_test_cases
def get_test_cases(task):
kwarglist = toolz.keyfilter(lambda x: x != "return",
task.run.__annotations__)
if kwarglist:
value = next(kwarglist.itervalues())
return [toolz.valmap(lambda x: x[i], kwarglist)
for i in xrange(len(value))]
else:
return [{}]
示例15: _csv_to_dataframe
def _csv_to_dataframe(c, dshape=None, chunksize=None, **kwargs):
header = {False: None, True: 0}.get(
kwargs.pop('has_header', c.has_header), 'infer')
sep = kwargs.pop(
'sep', kwargs.pop('delimiter', c.dialect.get('delimiter', ',')))
encoding = kwargs.pop('encoding', c.encoding)
if dshape:
dtypes, parse_dates = dshape_to_pandas(dshape)
if isrecord(dshape.measure):
names = kwargs.get('names', dshape.measure.names)
else:
names = kwargs.get('names')
else:
dtypes = parse_dates = names = None
usecols = kwargs.pop('usecols', None)
if parse_dates and usecols:
parse_dates = [col for col in parse_dates if col in usecols]
# See read_csv docs for header for reasoning
if names:
try:
with c.open() as f:
found_names = pd.read_csv(f,
nrows=1,
encoding=encoding,
sep=sep)
except StopIteration:
with c.open() as f:
found_names = pd.read_csv(f, encoding=encoding, sep=sep)
if names and header == 'infer':
if [n.strip() for n in found_names] == [n.strip() for n in names]:
header = 0
elif (all(re.match('^\s*\D\w*\s*$', n) for n in found_names) and
not all(dt == datashape.string for dt in dshape.measure.types)):
header = 0
else:
header = None
kwargs = keyfilter(keywords(pd.read_csv).__contains__, kwargs)
with c.open() as f:
return pd.read_csv(f,
header=header,
sep=sep,
encoding=encoding,
dtype=dtypes,
parse_dates=parse_dates,
names=names,
chunksize=chunksize,
usecols=usecols,
**kwargs)