當前位置: 首頁>>代碼示例>>Python>>正文


Python toolz.concat方法代碼示例

本文整理匯總了Python中toolz.concat方法的典型用法代碼示例。如果您正苦於以下問題:Python toolz.concat方法的具體用法?Python toolz.concat怎麽用?Python toolz.concat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在toolz的用法示例。


在下文中一共展示了toolz.concat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: lookup_random

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def lookup_random(self) -> Tuple[kademlia.Node, ...]:
        query_nodes = list(
            self.routing.get_random_nodes(self._concurrent_topic_nodes_requests)
        )
        expected_echoes = tuple(
            (n, self.send_topic_query(n, self.topic)) for n in query_nodes
        )
        replies = await asyncio.gather(
            *[self.wait_topic_nodes(n, echo) for n, echo in expected_echoes]
        )
        seen_nodes = set(toolz.concat(replies))
        self.logger.debug(
            "Got %d nodes for the %s topic: %s", len(seen_nodes), self.topic, seen_nodes
        )
        for node in seen_nodes:
            self.topic_table.add_node(node, self.topic)

        if len(seen_nodes) < kademlia.k_bucket_size:
            # Not enough nodes were found for our topic, so perform a regular kademlia lookup for
            # a random node ID.
            extra_nodes = await super().lookup_random()
            seen_nodes.update(extra_nodes)

        return tuple(seen_nodes) 
開發者ID:QuarkChain,項目名稱:pyquarkchain,代碼行數:26,代碼來源:discovery.py

示例2: fit

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def fit(self, text):
        # type: (Encoder, Iterable[str]) -> None
        """ Learn vocab from text. """
        if self.lowercase:
            _text = [l.lower().strip() for l in text]
        else:
            _text = [l.strip() for l in text]
        # First, learn word vocab
        self.word_vocab = self.learn_word_vocab(_text)

        remaining_words = [word for word in toolz.concat(map(self.word_tokenizer, _text))
                           if word not in self.word_vocab]
        self.bpe_vocab = self.learn_bpe_vocab(remaining_words)

        self.inverse_word_vocab = {idx: token for token, idx in self.word_vocab.items()}
        self.inverse_bpe_vocab = {idx: token for token, idx in self.bpe_vocab.items()} 
開發者ID:soaxelbrooke,項目名稱:python-bpe,代碼行數:18,代碼來源:encoder.py

示例3: find_list

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def find_list(self, node):
        return list(toolz.concat(map(self.find, node))) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:4,代碼來源:find.py

示例4: find_Call

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def find_Call(self, node):
        if not isinstance(node.func, ast.Name):
            fields = node._fields
        else:
            fields = [field for field in node._fields if field != 'func']
        return toolz.concat(
            map(self.find, (getattr(node, field) for field in fields))
        ) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:10,代碼來源:find.py

示例5: physical_tables_join

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def physical_tables_join(join):
    # Physical roots of Join nodes are the unique physical roots of their
    # left and right TableNodes.
    func = compose(physical_tables, methodcaller('op'))
    return list(unique(concat(map(func, (join.left, join.right))))) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:7,代碼來源:selection.py

示例6: physical_tables_node

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def physical_tables_node(node):
    # Iterative case. Any other Node's physical roots are the unique physical
    # roots of that Node's root tables.
    return list(unique(concat(map(physical_tables, node.root_tables())))) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:6,代碼來源:selection.py

示例7: distinct_roots

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def distinct_roots(*expressions):
    roots = toolz.concat(
        expression._root_tables() for expression in expressions
    )
    return list(toolz.unique(roots)) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:7,代碼來源:operations.py

示例8: __init__

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def __init__(
        self, table, selections=None, predicates=None, sort_keys=None
    ):
        import ibis.expr.analysis as L

        # Argument cleaning
        selections = util.promote_list(
            selections if selections is not None else []
        )

        projections = []
        for selection in selections:
            if isinstance(selection, str):
                projection = table[selection]
            else:
                projection = selection
            projections.append(projection)

        sort_keys = [
            to_sort_key(table, k)
            for k in util.promote_list(
                sort_keys if sort_keys is not None else []
            )
        ]

        predicates = list(
            toolz.concat(
                map(
                    L.flatten_predicate,
                    predicates if predicates is not None else [],
                )
            )
        )

        super().__init__(
            table=table,
            selections=projections,
            predicates=predicates,
            sort_keys=sort_keys,
        ) 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:42,代碼來源:operations.py

示例9: root_tables

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def root_tables(self):
        result = list(
            toolz.unique(
                toolz.concat(arg._root_tables() for arg in self.func_args)
            )
        )

        return result 
開發者ID:ibis-project,項目名稱:ibis,代碼行數:10,代碼來源:operations.py

示例10: learn_word_vocab

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def learn_word_vocab(self, sentences):
        # type: (Encoder, Iterable[str]) -> Dict[str, int]
        """ Build vocab from self.word_vocab_size most common tokens in provided sentences """
        word_counts = Counter(word for word in toolz.concat(map(self.word_tokenizer, sentences)))
        for token in self.required_tokens:
            word_counts[token] = int(2**63)
        sorted_word_counts = sorted(word_counts.items(), key=lambda p: -p[1])
        return {word: idx for idx, (word, count) in enumerate(sorted_word_counts[:self.word_vocab_size])} 
開發者ID:plkmo,項目名稱:NLP_Toolkit,代碼行數:10,代碼來源:bpe_vocab.py

示例11: fit

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def fit(self, text):
        # type: (Encoder, Iterable[str]) -> None
        """ Learn vocab from text. """
        _text = [l.lower().strip() for l in text]

        # First, learn word vocab
        self.word_vocab = self.learn_word_vocab(_text)

        remaining_words = [word for word in toolz.concat(map(self.word_tokenizer, _text))
                           if word not in self.word_vocab]
        self.bpe_vocab = self.learn_bpe_vocab(remaining_words)

        self.inverse_word_vocab = {idx: token for token, idx in self.word_vocab.items()}
        self.inverse_bpe_vocab = {idx: token for token, idx in self.bpe_vocab.items()} 
開發者ID:plkmo,項目名稱:NLP_Toolkit,代碼行數:16,代碼來源:bpe_vocab.py

示例12: test_default_calendars

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def test_default_calendars(self):
        dispatcher = TradingCalendarDispatcher(
            calendars={},
            calendar_factories=_default_calendar_factories,
            aliases=_default_calendar_aliases,
        )

        # These are ordered aliases first, so that we can deregister the
        # canonical factories when we're done with them, and we'll be done with
        # them after they've been used by all aliases and by canonical name.
        for name in concat([_default_calendar_aliases,
                            _default_calendar_factories]):
            self.assertIsNotNone(dispatcher.get_calendar(name),
                                 "get_calendar(%r) returned None" % name)
            dispatcher.deregister_calendar(name) 
開發者ID:quantopian,項目名稱:trading_calendars,代碼行數:17,代碼來源:test_trading_calendar.py

示例13: powerset

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def powerset(values):
    """
    Return the power set (i.e., the set of all subsets) of entries in `values`.
    """
    return concat(combinations(values, i) for i in range(len(values) + 1)) 
開發者ID:zhanghan1990,項目名稱:zipline-chinese,代碼行數:7,代碼來源:core.py

示例14: everything_but

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def everything_but(k, d):
    """
    Return iterator of all values in d except the values in k.
    """
    assert k in d
    return concat(itervalues(keyfilter(ne(k), d))) 
開發者ID:zhanghan1990,項目名稱:zipline-chinese,代碼行數:8,代碼來源:test_numpy_utils.py

示例15: test_current_session

# 需要導入模塊: import toolz [as 別名]
# 或者: from toolz import concat [as 別名]
def test_current_session(self):
        regular_minutes = self.trading_calendar.minutes_for_sessions_in_range(
            self.equity_minute_bar_days[0],
            self.equity_minute_bar_days[-1]
        )

        bts_minutes = days_at_time(
            self.equity_minute_bar_days,
            time(8, 45),
            "US/Eastern"
        )

        # some other non-market-minute
        three_oh_six_am_minutes = days_at_time(
            self.equity_minute_bar_days,
            time(3, 6),
            "US/Eastern"
        )

        all_minutes = [regular_minutes, bts_minutes, three_oh_six_am_minutes]
        for minute in list(concat(all_minutes)):
            bar_data = self.create_bardata(lambda: minute)

            self.assertEqual(
                self.trading_calendar.minute_to_session_label(minute),
                bar_data.current_session
            ) 
開發者ID:enigmampc,項目名稱:catalyst,代碼行數:29,代碼來源:test_bar_data.py


注:本文中的toolz.concat方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。