當前位置: 首頁>>代碼示例>>Python>>正文


Python operator.iconcat方法代碼示例

本文整理匯總了Python中operator.iconcat方法的典型用法代碼示例。如果您正苦於以下問題:Python operator.iconcat方法的具體用法?Python operator.iconcat怎麽用?Python operator.iconcat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在operator的用法示例。


在下文中一共展示了operator.iconcat方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_unique_terms

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def get_unique_terms():
    "retrieve all unique terms from termdata definitions"
    ts = functools.reduce(operator.iconcat, terms_by_type.values(), [])
    cs = functools.reduce(operator.iconcat, terms_by_country.values(), [])
    return set(ts + cs) 
開發者ID:psolin,項目名稱:cleanco,代碼行數:7,代碼來源:clean.py

示例2: flatten

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def flatten(l):
    if isinstance(l, list):
        return functools.reduce(operator.iconcat, l, [])
    elif isinstance(l, dict):
        return flattenDict(l) 
開發者ID:cadCAD-org,項目名稱:cadCAD,代碼行數:7,代碼來源:__init__.py

示例3: flatten

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def flatten(x: Sequence):
    """
    Flatten the provided (potentially nested) sequence

    Args:
        x (Sequence): Potentially nested sequence to flatten

    Returns:
        list: Flattened sequence
    """

    return functools.reduce(operator.iconcat, x, []) 
開發者ID:plkmo,項目名稱:NLP_Toolkit,代碼行數:14,代碼來源:tokenization_utils.py

示例4: test_inplace

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def test_inplace(self):
        class C(object):
            def __iadd__     (self, other): return "iadd"
            def __iand__     (self, other): return "iand"
            def __idiv__     (self, other): return "idiv"
            def __ifloordiv__(self, other): return "ifloordiv"
            def __ilshift__  (self, other): return "ilshift"
            def __imod__     (self, other): return "imod"
            def __imul__     (self, other): return "imul"
            def __ior__      (self, other): return "ior"
            def __ipow__     (self, other): return "ipow"
            def __irshift__  (self, other): return "irshift"
            def __isub__     (self, other): return "isub"
            def __itruediv__ (self, other): return "itruediv"
            def __ixor__     (self, other): return "ixor"
            def __getitem__(self, other): return 5  # so that C is a sequence
        c = C()
        self.assertEqual(operator.iadd     (c, 5), "iadd")
        self.assertEqual(operator.iand     (c, 5), "iand")
        self.assertEqual(operator.idiv     (c, 5), "idiv")
        self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
        self.assertEqual(operator.ilshift  (c, 5), "ilshift")
        self.assertEqual(operator.imod     (c, 5), "imod")
        self.assertEqual(operator.imul     (c, 5), "imul")
        self.assertEqual(operator.ior      (c, 5), "ior")
        self.assertEqual(operator.ipow     (c, 5), "ipow")
        self.assertEqual(operator.irshift  (c, 5), "irshift")
        self.assertEqual(operator.isub     (c, 5), "isub")
        self.assertEqual(operator.itruediv (c, 5), "itruediv")
        self.assertEqual(operator.ixor     (c, 5), "ixor")
        self.assertEqual(operator.iconcat  (c, c), "iadd")
        self.assertEqual(operator.irepeat  (c, 5), "imul")
        self.assertEqual(operator.__iadd__     (c, 5), "iadd")
        self.assertEqual(operator.__iand__     (c, 5), "iand")
        self.assertEqual(operator.__idiv__     (c, 5), "idiv")
        self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
        self.assertEqual(operator.__ilshift__  (c, 5), "ilshift")
        self.assertEqual(operator.__imod__     (c, 5), "imod")
        self.assertEqual(operator.__imul__     (c, 5), "imul")
        self.assertEqual(operator.__ior__      (c, 5), "ior")
        self.assertEqual(operator.__ipow__     (c, 5), "ipow")
        self.assertEqual(operator.__irshift__  (c, 5), "irshift")
        self.assertEqual(operator.__isub__     (c, 5), "isub")
        self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
        self.assertEqual(operator.__ixor__     (c, 5), "ixor")
        self.assertEqual(operator.__iconcat__  (c, c), "iadd")
        self.assertEqual(operator.__irepeat__  (c, 5), "imul") 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:49,代碼來源:test_operator.py

示例5: rectify

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def rectify(dataset: str,
            xy_var_names: str = None,
            var_names: str = None,
            output_path: str = None,
            output_format: str = None,
            output_size: str = None,
            output_tile_size: str = None,
            output_point: str = None,
            output_res: float = None,
            delta: float = DEFAULT_DELTA,
            dry_run: bool = DEFAULT_DRY_RUN):
    """
    Rectify a dataset to WGS-84 using its per-pixel geo-locations.
    """

    input_path = dataset

    xy_var_names = parse_cli_sequence(xy_var_names,
                                      metavar='VARIABLES', num_items=2,
                                      item_plural_name='names')
    var_name_lists = [parse_cli_sequence(var_name_specifier,
                                         metavar='VARIABLES',
                                         item_plural_name='names')
                      for var_name_specifier in var_names]
    var_name_flat_list = functools.reduce(operator.iconcat, var_name_lists, [])

    output_size = parse_cli_sequence(output_size,
                                     metavar='SIZE', num_items=2, item_plural_name='sizes',
                                     item_parser=int, item_validator=assert_positive_int_item)
    output_tile_size = parse_cli_sequence(output_tile_size,
                                          metavar='TILE_SIZE', num_items=2, item_plural_name='tile sizes',
                                          item_parser=int, item_validator=assert_positive_int_item)
    output_point = parse_cli_sequence(output_point,
                                      metavar='POINT', num_items=2, item_plural_name='coordinates',
                                      item_parser=float)

    # noinspection PyBroadException
    _rectify(input_path,
             xy_var_names,
             None if len(var_name_flat_list) == 0 else var_name_flat_list,
             output_path,
             output_format,
             output_size,
             output_tile_size,
             output_point,
             output_res,
             delta,
             dry_run=dry_run,
             monitor=print)

    return 0 
開發者ID:dcs4cop,項目名稱:xcube,代碼行數:53,代碼來源:rectify.py

示例6: calculate_n

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def calculate_n(model, batch_vectorizer):
    """
    Calculate all necessary statistics from batch. This may take some time.
    """
    doc2token = {}
    for batch_id in range(len(batch_vectorizer._batches_list)):
        batch_name = batch_vectorizer._batches_list[batch_id]._filename
        batch = artm.messages.Batch()
        with open(batch_name, "rb") as f:
            batch.ParseFromString(f.read())

        for item_id in range(len(batch.item)):
            item = batch.item[item_id]
            theta_item_id = getattr(item, model.theta_columns_naming)

            doc2token[theta_item_id] = {'tokens': [], 'weights': []}
            for token_id, token_weight in zip(item.token_id, item.token_weight):
                doc2token[theta_item_id]['tokens'].append(batch.token[token_id])
                doc2token[theta_item_id]['weights'].append(token_weight)

    previous_num_document_passes = model._num_document_passes
    model._num_document_passes = 10
    ptdw = model.transform(batch_vectorizer=batch_vectorizer, theta_matrix_type='dense_ptdw')
    model._num_document_passes = previous_num_document_passes

    docs = ptdw.columns
    docs_unique = OrderedDict.fromkeys(docs).keys()

    tokens = [doc2token[doc_id]['tokens'] for doc_id in docs_unique]
    tokens = functools.reduce(operator.iconcat, tokens, [])

    ndw = np.concatenate([np.array(doc2token[doc_id]['weights']) for doc_id in docs_unique])
    ndw = np.tile(ndw, (ptdw.shape[0], 1))

    ptdw.columns = pd.MultiIndex.from_arrays([docs, tokens], names=('doc', 'token'))
    ntdw = ptdw * ndw

    ntd = ntdw.groupby(level=0, axis=1).sum()

    nwt = ntdw.groupby(level=1, axis=1).sum().T

    nt = nwt.sum(axis=0)

    return ntdw, ntd, nwt, nt 
開發者ID:machine-intelligence-laboratory,項目名稱:TopicNet,代碼行數:46,代碼來源:semantic_radius_score.py

示例7: to_dict

# 需要導入模塊: import operator [as 別名]
# 或者: from operator import iconcat [as 別名]
def to_dict(self, parsed_results):
        """This function standandlize the parsed results. It first reduces the
        collected parsed results into lists of tuples; then loads them as
        pandas.DataFrames whose columns represent p_keys.

        Parameters
        ----------
        parsed_results : iterable
            An iterable that contains parsed results generated by Parser.

        Returns
        ----------
        A dict, the keys of which are consistent with database tables; and
        the values of which are pandas.DataFrame.
        """
        tkeys = PMETA.keys()
        reduced_results = reduce(lambda x, y: {k: iconcat(x[k], y[k])
                                               for k in tkeys},
                                 parsed_results)
        dfs = {
            k: pd.DataFrame(reduced_results[k], columns=PMETA[k]['p_keys'])
            for k in tkeys
        }
        # drop duplicates mainly based on unique keys
        for k in tkeys:
            if k == 'full_user' or k == 'mentioned_user':
                dfs[k] = dfs[k].sort_values('updated_at', ascending=False)
            #
            # !IMPORTANT (ESPECIALLY FOR `ass_tweet` table)
            # Causion:
            # (1) The default missing values for pandas.DataFrame is
            # np.NAN, which is not compatible with SQL insertion in SQLAlchemy.
            # Thus a replace operation need to take.
            # (2) When missing values occurs, the dtype of a DataFrame would
            # be 'float' (either float32 or float64), which could truncate
            # the large numbers. Since version 24, pandas provide new data type
            # Int64 (CAPITAL I). Thus we need to convert it to this data type.
            #
            if k == 'ass_tweet':
                # replace np.NAN as None
                dfs[k] = dfs[k].astype('Int64')
                dfs[k].replace({pd.np.nan: None}, inplace=True)
            dfs[k] = dfs[k].drop_duplicates(PMETA[k]['pu_keys'], keep='first')
        return dfs 
開發者ID:IUNetSci,項目名稱:hoaxy-backend,代碼行數:46,代碼來源:parsers.py


注:本文中的operator.iconcat方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。