本文整理汇总了Python中operator.iconcat方法的典型用法代码示例。如果您正苦于以下问题:Python operator.iconcat方法的具体用法?Python operator.iconcat怎么用?Python operator.iconcat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类operator
的用法示例。
在下文中一共展示了operator.iconcat方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_unique_terms
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def get_unique_terms():
"retrieve all unique terms from termdata definitions"
ts = functools.reduce(operator.iconcat, terms_by_type.values(), [])
cs = functools.reduce(operator.iconcat, terms_by_country.values(), [])
return set(ts + cs)
示例2: flatten
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def flatten(l):
if isinstance(l, list):
return functools.reduce(operator.iconcat, l, [])
elif isinstance(l, dict):
return flattenDict(l)
示例3: flatten
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def flatten(x: Sequence):
"""
Flatten the provided (potentially nested) sequence
Args:
x (Sequence): Potentially nested sequence to flatten
Returns:
list: Flattened sequence
"""
return functools.reduce(operator.iconcat, x, [])
示例4: test_inplace
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def test_inplace(self):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __idiv__ (self, other): return "idiv"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.idiv (c, 5), "idiv")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
self.assertEqual(operator.irepeat (c, 5), "imul")
self.assertEqual(operator.__iadd__ (c, 5), "iadd")
self.assertEqual(operator.__iand__ (c, 5), "iand")
self.assertEqual(operator.__idiv__ (c, 5), "idiv")
self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
self.assertEqual(operator.__ilshift__ (c, 5), "ilshift")
self.assertEqual(operator.__imod__ (c, 5), "imod")
self.assertEqual(operator.__imul__ (c, 5), "imul")
self.assertEqual(operator.__ior__ (c, 5), "ior")
self.assertEqual(operator.__ipow__ (c, 5), "ipow")
self.assertEqual(operator.__irshift__ (c, 5), "irshift")
self.assertEqual(operator.__isub__ (c, 5), "isub")
self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
self.assertEqual(operator.__ixor__ (c, 5), "ixor")
self.assertEqual(operator.__iconcat__ (c, c), "iadd")
self.assertEqual(operator.__irepeat__ (c, 5), "imul")
示例5: rectify
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def rectify(dataset: str,
xy_var_names: str = None,
var_names: str = None,
output_path: str = None,
output_format: str = None,
output_size: str = None,
output_tile_size: str = None,
output_point: str = None,
output_res: float = None,
delta: float = DEFAULT_DELTA,
dry_run: bool = DEFAULT_DRY_RUN):
"""
Rectify a dataset to WGS-84 using its per-pixel geo-locations.
"""
input_path = dataset
xy_var_names = parse_cli_sequence(xy_var_names,
metavar='VARIABLES', num_items=2,
item_plural_name='names')
var_name_lists = [parse_cli_sequence(var_name_specifier,
metavar='VARIABLES',
item_plural_name='names')
for var_name_specifier in var_names]
var_name_flat_list = functools.reduce(operator.iconcat, var_name_lists, [])
output_size = parse_cli_sequence(output_size,
metavar='SIZE', num_items=2, item_plural_name='sizes',
item_parser=int, item_validator=assert_positive_int_item)
output_tile_size = parse_cli_sequence(output_tile_size,
metavar='TILE_SIZE', num_items=2, item_plural_name='tile sizes',
item_parser=int, item_validator=assert_positive_int_item)
output_point = parse_cli_sequence(output_point,
metavar='POINT', num_items=2, item_plural_name='coordinates',
item_parser=float)
# noinspection PyBroadException
_rectify(input_path,
xy_var_names,
None if len(var_name_flat_list) == 0 else var_name_flat_list,
output_path,
output_format,
output_size,
output_tile_size,
output_point,
output_res,
delta,
dry_run=dry_run,
monitor=print)
return 0
示例6: calculate_n
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def calculate_n(model, batch_vectorizer):
"""
Calculate all necessary statistics from batch. This may take some time.
"""
doc2token = {}
for batch_id in range(len(batch_vectorizer._batches_list)):
batch_name = batch_vectorizer._batches_list[batch_id]._filename
batch = artm.messages.Batch()
with open(batch_name, "rb") as f:
batch.ParseFromString(f.read())
for item_id in range(len(batch.item)):
item = batch.item[item_id]
theta_item_id = getattr(item, model.theta_columns_naming)
doc2token[theta_item_id] = {'tokens': [], 'weights': []}
for token_id, token_weight in zip(item.token_id, item.token_weight):
doc2token[theta_item_id]['tokens'].append(batch.token[token_id])
doc2token[theta_item_id]['weights'].append(token_weight)
previous_num_document_passes = model._num_document_passes
model._num_document_passes = 10
ptdw = model.transform(batch_vectorizer=batch_vectorizer, theta_matrix_type='dense_ptdw')
model._num_document_passes = previous_num_document_passes
docs = ptdw.columns
docs_unique = OrderedDict.fromkeys(docs).keys()
tokens = [doc2token[doc_id]['tokens'] for doc_id in docs_unique]
tokens = functools.reduce(operator.iconcat, tokens, [])
ndw = np.concatenate([np.array(doc2token[doc_id]['weights']) for doc_id in docs_unique])
ndw = np.tile(ndw, (ptdw.shape[0], 1))
ptdw.columns = pd.MultiIndex.from_arrays([docs, tokens], names=('doc', 'token'))
ntdw = ptdw * ndw
ntd = ntdw.groupby(level=0, axis=1).sum()
nwt = ntdw.groupby(level=1, axis=1).sum().T
nt = nwt.sum(axis=0)
return ntdw, ntd, nwt, nt
示例7: to_dict
# 需要导入模块: import operator [as 别名]
# 或者: from operator import iconcat [as 别名]
def to_dict(self, parsed_results):
"""This function standandlize the parsed results. It first reduces the
collected parsed results into lists of tuples; then loads them as
pandas.DataFrames whose columns represent p_keys.
Parameters
----------
parsed_results : iterable
An iterable that contains parsed results generated by Parser.
Returns
----------
A dict, the keys of which are consistent with database tables; and
the values of which are pandas.DataFrame.
"""
tkeys = PMETA.keys()
reduced_results = reduce(lambda x, y: {k: iconcat(x[k], y[k])
for k in tkeys},
parsed_results)
dfs = {
k: pd.DataFrame(reduced_results[k], columns=PMETA[k]['p_keys'])
for k in tkeys
}
# drop duplicates mainly based on unique keys
for k in tkeys:
if k == 'full_user' or k == 'mentioned_user':
dfs[k] = dfs[k].sort_values('updated_at', ascending=False)
#
# !IMPORTANT (ESPECIALLY FOR `ass_tweet` table)
# Causion:
# (1) The default missing values for pandas.DataFrame is
# np.NAN, which is not compatible with SQL insertion in SQLAlchemy.
# Thus a replace operation need to take.
# (2) When missing values occurs, the dtype of a DataFrame would
# be 'float' (either float32 or float64), which could truncate
# the large numbers. Since version 24, pandas provide new data type
# Int64 (CAPITAL I). Thus we need to convert it to this data type.
#
if k == 'ass_tweet':
# replace np.NAN as None
dfs[k] = dfs[k].astype('Int64')
dfs[k].replace({pd.np.nan: None}, inplace=True)
dfs[k] = dfs[k].drop_duplicates(PMETA[k]['pu_keys'], keep='first')
return dfs