当前位置: 首页>>代码示例>>Python>>正文


Python toolz.concat方法代码示例

本文整理汇总了Python中toolz.concat方法的典型用法代码示例。如果您正苦于以下问题:Python toolz.concat方法的具体用法?Python toolz.concat怎么用?Python toolz.concat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在toolz的用法示例。


在下文中一共展示了toolz.concat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: lookup_random

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def lookup_random(self) -> Tuple[kademlia.Node, ...]:
        query_nodes = list(
            self.routing.get_random_nodes(self._concurrent_topic_nodes_requests)
        )
        expected_echoes = tuple(
            (n, self.send_topic_query(n, self.topic)) for n in query_nodes
        )
        replies = await asyncio.gather(
            *[self.wait_topic_nodes(n, echo) for n, echo in expected_echoes]
        )
        seen_nodes = set(toolz.concat(replies))
        self.logger.debug(
            "Got %d nodes for the %s topic: %s", len(seen_nodes), self.topic, seen_nodes
        )
        for node in seen_nodes:
            self.topic_table.add_node(node, self.topic)

        if len(seen_nodes) < kademlia.k_bucket_size:
            # Not enough nodes were found for our topic, so perform a regular kademlia lookup for
            # a random node ID.
            extra_nodes = await super().lookup_random()
            seen_nodes.update(extra_nodes)

        return tuple(seen_nodes) 
开发者ID:QuarkChain,项目名称:pyquarkchain,代码行数:26,代码来源:discovery.py

示例2: fit

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def fit(self, text):
        # type: (Encoder, Iterable[str]) -> None
        """ Learn vocab from text. """
        if self.lowercase:
            _text = [l.lower().strip() for l in text]
        else:
            _text = [l.strip() for l in text]
        # First, learn word vocab
        self.word_vocab = self.learn_word_vocab(_text)

        remaining_words = [word for word in toolz.concat(map(self.word_tokenizer, _text))
                           if word not in self.word_vocab]
        self.bpe_vocab = self.learn_bpe_vocab(remaining_words)

        self.inverse_word_vocab = {idx: token for token, idx in self.word_vocab.items()}
        self.inverse_bpe_vocab = {idx: token for token, idx in self.bpe_vocab.items()} 
开发者ID:soaxelbrooke,项目名称:python-bpe,代码行数:18,代码来源:encoder.py

示例3: find_list

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def find_list(self, node):
        return list(toolz.concat(map(self.find, node))) 
开发者ID:ibis-project,项目名称:ibis,代码行数:4,代码来源:find.py

示例4: find_Call

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def find_Call(self, node):
        if not isinstance(node.func, ast.Name):
            fields = node._fields
        else:
            fields = [field for field in node._fields if field != 'func']
        return toolz.concat(
            map(self.find, (getattr(node, field) for field in fields))
        ) 
开发者ID:ibis-project,项目名称:ibis,代码行数:10,代码来源:find.py

示例5: physical_tables_join

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def physical_tables_join(join):
    # Physical roots of Join nodes are the unique physical roots of their
    # left and right TableNodes.
    func = compose(physical_tables, methodcaller('op'))
    return list(unique(concat(map(func, (join.left, join.right))))) 
开发者ID:ibis-project,项目名称:ibis,代码行数:7,代码来源:selection.py

示例6: physical_tables_node

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def physical_tables_node(node):
    # Iterative case. Any other Node's physical roots are the unique physical
    # roots of that Node's root tables.
    return list(unique(concat(map(physical_tables, node.root_tables())))) 
开发者ID:ibis-project,项目名称:ibis,代码行数:6,代码来源:selection.py

示例7: distinct_roots

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def distinct_roots(*expressions):
    roots = toolz.concat(
        expression._root_tables() for expression in expressions
    )
    return list(toolz.unique(roots)) 
开发者ID:ibis-project,项目名称:ibis,代码行数:7,代码来源:operations.py

示例8: __init__

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def __init__(
        self, table, selections=None, predicates=None, sort_keys=None
    ):
        import ibis.expr.analysis as L

        # Argument cleaning
        selections = util.promote_list(
            selections if selections is not None else []
        )

        projections = []
        for selection in selections:
            if isinstance(selection, str):
                projection = table[selection]
            else:
                projection = selection
            projections.append(projection)

        sort_keys = [
            to_sort_key(table, k)
            for k in util.promote_list(
                sort_keys if sort_keys is not None else []
            )
        ]

        predicates = list(
            toolz.concat(
                map(
                    L.flatten_predicate,
                    predicates if predicates is not None else [],
                )
            )
        )

        super().__init__(
            table=table,
            selections=projections,
            predicates=predicates,
            sort_keys=sort_keys,
        ) 
开发者ID:ibis-project,项目名称:ibis,代码行数:42,代码来源:operations.py

示例9: root_tables

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def root_tables(self):
        result = list(
            toolz.unique(
                toolz.concat(arg._root_tables() for arg in self.func_args)
            )
        )

        return result 
开发者ID:ibis-project,项目名称:ibis,代码行数:10,代码来源:operations.py

示例10: learn_word_vocab

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def learn_word_vocab(self, sentences):
        # type: (Encoder, Iterable[str]) -> Dict[str, int]
        """ Build vocab from self.word_vocab_size most common tokens in provided sentences """
        word_counts = Counter(word for word in toolz.concat(map(self.word_tokenizer, sentences)))
        for token in self.required_tokens:
            word_counts[token] = int(2**63)
        sorted_word_counts = sorted(word_counts.items(), key=lambda p: -p[1])
        return {word: idx for idx, (word, count) in enumerate(sorted_word_counts[:self.word_vocab_size])} 
开发者ID:plkmo,项目名称:NLP_Toolkit,代码行数:10,代码来源:bpe_vocab.py

示例11: fit

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def fit(self, text):
        # type: (Encoder, Iterable[str]) -> None
        """ Learn vocab from text. """
        _text = [l.lower().strip() for l in text]

        # First, learn word vocab
        self.word_vocab = self.learn_word_vocab(_text)

        remaining_words = [word for word in toolz.concat(map(self.word_tokenizer, _text))
                           if word not in self.word_vocab]
        self.bpe_vocab = self.learn_bpe_vocab(remaining_words)

        self.inverse_word_vocab = {idx: token for token, idx in self.word_vocab.items()}
        self.inverse_bpe_vocab = {idx: token for token, idx in self.bpe_vocab.items()} 
开发者ID:plkmo,项目名称:NLP_Toolkit,代码行数:16,代码来源:bpe_vocab.py

示例12: test_default_calendars

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def test_default_calendars(self):
        dispatcher = TradingCalendarDispatcher(
            calendars={},
            calendar_factories=_default_calendar_factories,
            aliases=_default_calendar_aliases,
        )

        # These are ordered aliases first, so that we can deregister the
        # canonical factories when we're done with them, and we'll be done with
        # them after they've been used by all aliases and by canonical name.
        for name in concat([_default_calendar_aliases,
                            _default_calendar_factories]):
            self.assertIsNotNone(dispatcher.get_calendar(name),
                                 "get_calendar(%r) returned None" % name)
            dispatcher.deregister_calendar(name) 
开发者ID:quantopian,项目名称:trading_calendars,代码行数:17,代码来源:test_trading_calendar.py

示例13: powerset

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def powerset(values):
    """
    Return the power set (i.e., the set of all subsets) of entries in `values`.
    """
    return concat(combinations(values, i) for i in range(len(values) + 1)) 
开发者ID:zhanghan1990,项目名称:zipline-chinese,代码行数:7,代码来源:core.py

示例14: everything_but

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def everything_but(k, d):
    """
    Return iterator of all values in d except the values in k.
    """
    assert k in d
    return concat(itervalues(keyfilter(ne(k), d))) 
开发者ID:zhanghan1990,项目名称:zipline-chinese,代码行数:8,代码来源:test_numpy_utils.py

示例15: test_current_session

# 需要导入模块: import toolz [as 别名]
# 或者: from toolz import concat [as 别名]
def test_current_session(self):
        regular_minutes = self.trading_calendar.minutes_for_sessions_in_range(
            self.equity_minute_bar_days[0],
            self.equity_minute_bar_days[-1]
        )

        bts_minutes = days_at_time(
            self.equity_minute_bar_days,
            time(8, 45),
            "US/Eastern"
        )

        # some other non-market-minute
        three_oh_six_am_minutes = days_at_time(
            self.equity_minute_bar_days,
            time(3, 6),
            "US/Eastern"
        )

        all_minutes = [regular_minutes, bts_minutes, three_oh_six_am_minutes]
        for minute in list(concat(all_minutes)):
            bar_data = self.create_bardata(lambda: minute)

            self.assertEqual(
                self.trading_calendar.minute_to_session_label(minute),
                bar_data.current_session
            ) 
开发者ID:enigmampc,项目名称:catalyst,代码行数:29,代码来源:test_bar_data.py


注:本文中的toolz.concat方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。