当前位置: 首页>>代码示例>>Python>>正文


Python toolz.compose函数代码示例

本文整理汇总了Python中toolz.compose函数的典型用法代码示例。如果您正苦于以下问题:Python compose函数的具体用法?Python compose怎么用?Python compose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了compose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_normalize_function

def test_normalize_function():
    def f1(a, b, c=1):
        pass
    def f2(a, b=1, c=2):
        pass
    def f3(a):
        pass

    assert normalize_function(f2)

    f = lambda a: a
    assert normalize_function(f)

    assert (normalize_function(partial(f2, b=2)) ==
            normalize_function(partial(f2, b=2)))

    assert (normalize_function(partial(f2, b=2)) !=
            normalize_function(partial(f2, b=3)))

    assert (normalize_function(partial(f1, b=2)) !=
            normalize_function(partial(f2, b=2)))

    assert (normalize_function(compose(f2, f3)) ==
            normalize_function(compose(f2, f3)))

    assert (normalize_function(compose(f2, f3)) !=
            normalize_function(compose(f2, f1)))

    assert normalize_function(curry(f2)) == normalize_function(curry(f2))
    assert normalize_function(curry(f2)) != normalize_function(curry(f1))
    assert (normalize_function(curry(f2, b=1)) ==
            normalize_function(curry(f2, b=1)))
    assert (normalize_function(curry(f2, b=1)) !=
            normalize_function(curry(f2, b=2)))
开发者ID:datastark,项目名称:dask,代码行数:34,代码来源:test_base.py

示例2: _tree_reduce

def _tree_reduce(x, aggregate, axis, keepdims, dtype, split_every=None,
                combine=None):
    """Perform the tree reduction step of a reduction.

    Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
    """
    # Normalize split_every
    split_every = split_every or _globals.get('split_every', 4)
    if isinstance(split_every, dict):
        split_every = dict((k, split_every.get(k, 2)) for k in axis)
    elif isinstance(split_every, int):
        n = builtins.max(int(split_every ** (1/(len(axis) or 1))), 2)
        split_every = dict.fromkeys(axis, n)
    else:
        split_every = dict((k, v) for (k, v) in enumerate(x.numblocks) if k in axis)

    # Reduce across intermediates
    depth = 1
    for i, n in enumerate(x.numblocks):
        if i in split_every and split_every[i] != 1:
            depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
    func = compose(partial(combine or aggregate, axis=axis, keepdims=True),
                   partial(_concatenate2, axes=axis))
    for i in range(depth - 1):
        x = partial_reduce(func, x, split_every, True, None)
    func = compose(partial(aggregate, axis=axis, keepdims=keepdims),
                   partial(_concatenate2, axes=axis))
    return partial_reduce(func, x, split_every, keepdims=keepdims,
                          dtype=dtype)
开发者ID:jcorbin,项目名称:dask,代码行数:29,代码来源:reductions.py

示例3: _tree_reduce

def _tree_reduce(x, aggregate, axis, keepdims, dtype, split_every=None,
                 combine=None, name=None, concatenate=True):
    """ Perform the tree reduction step of a reduction.

    Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
    """
    # Normalize split_every
    split_every = split_every or config.get('split_every', 4)
    if isinstance(split_every, dict):
        split_every = dict((k, split_every.get(k, 2)) for k in axis)
    elif isinstance(split_every, Integral):
        n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
        split_every = dict.fromkeys(axis, n)
    else:
        raise ValueError("split_every must be a int or a dict")

    # Reduce across intermediates
    depth = 1
    for i, n in enumerate(x.numblocks):
        if i in split_every and split_every[i] != 1:
            depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
    func = partial(combine or aggregate, axis=axis, keepdims=True)
    if concatenate:
        func = compose(func, partial(_concatenate2, axes=axis))
    for i in range(depth - 1):
        x = partial_reduce(func, x, split_every, True, dtype=dtype,
                           name=(name or funcname(combine or aggregate)) + '-partial')
    func = partial(aggregate, axis=axis, keepdims=keepdims)
    if concatenate:
        func = compose(func, partial(_concatenate2, axes=axis))
    return partial_reduce(func, x, split_every, keepdims=keepdims, dtype=dtype,
                          name=(name or funcname(aggregate)) + '-aggregate')
开发者ID:martindurant,项目名称:dask,代码行数:32,代码来源:reductions.py

示例4: get_service_step

def get_service_step(service_recipe):
    """
    Get step timedelta: The smaller duration of service_recipe's periods.
    """
    def diff(start, end):
        return end - start
    res_delta_diffs = compose(map(lambda p: diff(*p)), get('delta_periods'))
    return compose(min, map(min), map(res_delta_diffs))(service_recipe)
开发者ID:qandobooking,项目名称:booking-engine,代码行数:8,代码来源:engine.py

示例5: _common

 def _common(self, Z, y):
     scale = Scaler(Z)
     transform = compose(prepend_x0, Scaler.normalize)
     X = transform(scale)
     data = zip(X, y)
     h_theta0 = [0.] * len(X[0])
     coeff = compose(scale.denormalize, 
                     get(0), 
                     lin_reg(J, gradJ, h_theta0, it_max=2000))
     h_thetad = coeff(data)
     return h_thetad
开发者ID:philiplessner,项目名称:FunctionalML,代码行数:11,代码来源:test_linear.py

示例6: ref_with_vcf_dicts_strategy_factory

def ref_with_vcf_dicts_strategy_factory(draw):
    '''
    Generate vcf records for randomish locations along a randomishly generated
    reference sequence. Each vcf record generator will have a randomish sized
    "chunk" of the reference to use

    Returns (reference sequence(str), iterable(vcf dicts))
    '''
    seq = draw(st.text(alphabet='ACGT', min_size=10, max_size=20))
    size = len(seq)
    # This gets you a list of numbers that are randomish and increasing
    ranges = draw(rolling_sum(1, 3, int(size/2)).map(lambda xs: ifilter(lambda x: x < size, xs)) )#.filter(_not(bool)))
    # Stream lets you re-use a generator without draining it.
    # Pairs will hold start/stop values for each part of sequence
    pairs = Stream() << partition(2, ranges)
    # POSs will contain the start position of each vcf row
    POSs = Stream() << imap(operator.itemgetter(0), pairs)
    # VCF files start at index 1; python starts at 0
    pairs_offset_1 = imap(lambda x: (x[0] - 1, x[1] - 1), pairs)
    #grab the pieces of the reference to build our elts from
    chunks = map(lambda x: seq[x[0]:x[1]], pairs_offset_1)
    #random chromosome name
    chrom = draw(st.text(string.ascii_letters))
    # Draw a new record for each of the Positions we have made
    vcfs = map(compose(draw, partial(vcf_dict_strategy_factory, chrom)), POSs, chunks)
    #TODO: ranges must be non-empty. Assuming vcfs for now.
    # vcfs can be a a generator
    #assume(len(vcfs) > 0)
    return (seq, vcfs)
开发者ID:VDBWRAIR,项目名称:biotest,代码行数:29,代码来源:biohypothesis.py

示例7: build_task_nodes

def build_task_nodes(files=None, select=None, task_uuid=None,
                     human_readable=True):
    """
    Build the task nodes given some input data, query criteria and formatting
    options.
    """
    def task_transformers():
        if human_readable:
            yield _convert_timestamp
        yield json.loads

    def filter_funcs():
        if select is not None:
            for query in select:
                yield filter_by_jmespath(query)

        if task_uuid is not None:
            yield filter_by_uuid(task_uuid)

    if files is None:
        files = [sys.stdin]

    tree = Tree()
    tasks = imap(compose(*task_transformers()),
                 chain.from_iterable(files))
    return tree.nodes(tree.merge_tasks(tasks, filter_funcs())),
开发者ID:itamarst,项目名称:eliottree,代码行数:26,代码来源:_cli.py

示例8: _lookup_symbol_strict

    def _lookup_symbol_strict(self, symbol, as_of_date):
        # split the symbol into the components, if there are no
        # company/share class parts then share_class_symbol will be empty
        company_symbol, share_class_symbol = split_delimited_symbol(symbol)
        try:
            owners = self.symbol_ownership_map[company_symbol, share_class_symbol]
            assert owners, "empty owners list for %r" % symbol
        except KeyError:
            # no equity has ever held this symbol
            raise SymbolNotFound(symbol=symbol)

        if not as_of_date:
            if len(owners) > 1:
                # more than one equity has held this ticker, this is ambigious
                # without the date
                raise MultipleSymbolsFound(
                    symbol=symbol, options=set(map(compose(self.retrieve_asset, attrgetter("sid")), owners))
                )

            # exactly one equity has ever held this symbol, we may resolve
            # without the date
            return self.retrieve_asset(owners[0].sid)

        for start, end, sid, _ in owners:
            if start <= as_of_date < end:
                # find the equity that owned it on the given asof date
                return self.retrieve_asset(sid)

        # no equity held the ticker on the given asof date
        raise SymbolNotFound(symbol=symbol)
开发者ID:RoyHsiao,项目名称:zipline,代码行数:30,代码来源:assets.py

示例9: cli

def cli(board_source, key, token, to, output, board):
    """Hi, I'm TrelloScribe. I take Trello boards and turn them into documents!"""
    # Compose a sequence of functions based on the options chosen
    # Note toolz.compose() works right to left
    read_phase = {
        'id': download_board(key, token),
        'name': toolz.compose(download_board(key, token), search_boards(key, token)),
        'file': read_board
    }
    convert_phase = {
        'raw': partial(json.dumps, indent=2),
        'md': ast_to_md,
        'html': toolz.compose(md_to_html, ast_to_md)
    }
    toolz.pipe(board, read_phase[board_source], trello_to_ast,
               convert_phase[to], partial(click.echo, file=output))
开发者ID:aulloa,项目名称:trelloscribe,代码行数:16,代码来源:cli.py

示例10: input2output

def input2output(query: Dict[str, Any], fields: List[str], options: Dict[str, int]) -> None:
    inout = compose(formated_output(fields),
                    get_output(fields),
                    get_info,
                    make_query(query, fields))
    html_text = inout(options)
    viewoutput(html_text)
开发者ID:philiplessner,项目名称:USPTOAPI,代码行数:7,代码来源:backend.py

示例11: interface_check

def interface_check():
    clear_log()
    cmd = "match(s:Switch) where s.model='T64G' or s.model='S9306' or s.model='S9303' or s.model='S8905' return s.ip,s.model"
    #  cmd = "match(s:Switch) where s.model='S9306' or s.model='s9303' return s.ip,s.model limit 2"
    nodes = graph.cypher.execute(cmd)
    switchs = [(x[0], x[1]) for x in nodes]
    list(map(compose(output_interface, get_interface), switchs))
开发者ID:sjava,项目名称:olt,代码行数:7,代码来源:switch.py

示例12: reduction

def reduction(x, chunk, aggregate, axis=None, keepdims=None, dtype=None):
    """ General version of reductions

    >>> reduction(my_array, np.sum, np.sum, axis=0, keepdims=False)  # doctest: +SKIP
    """
    if axis is None:
        axis = tuple(range(x.ndim))
    if isinstance(axis, int):
        axis = (axis,)

    chunk2 = partial(chunk, axis=axis, keepdims=True)
    aggregate2 = partial(aggregate, axis=axis, keepdims=keepdims)

    inds = tuple(range(x.ndim))
    tmp = atop(chunk2, next(names), inds, x, inds)

    inds2 = tuple(i for i in inds if i not in axis)

    result = atop(compose(aggregate2, curry(_concatenate2, axes=axis)),
                  next(names), inds2, tmp, inds, dtype=dtype)

    if keepdims:
        dsk = result.dask.copy()
        for k in flatten(result._keys()):
            k2 = (k[0],) + insert_many(k[1:], axis, 0)
            dsk[k2] = dsk.pop(k)
        blockdims = insert_many(result.blockdims, axis, [1])
        return Array(dsk, result.name, blockdims=blockdims, dtype=dtype)
    else:
        return result
开发者ID:kastnerkyle,项目名称:dask,代码行数:30,代码来源:reductions.py

示例13: ngram_tuples

def ngram_tuples(n, string, minlen=3, maxlen=25):
    """
    Creates ngram tuples of size 'n' from 'string'.
    Also, changes string to lowercase, removes generic stopwords and splits on all non alphanumeric.

    Ex:
        In [2]: list(ngram_tuples(n=1, string='Just another example text.'))
        Out[2]: [('another',), ('example',), ('text',)]

        In [2]: list(ngram_tuples(n=2, string='Just another example text.'))
        Out[2]: [('another', 'example'), ('example', 'text')]

        In [11]: list(ngram_tuples(3, 'I needed a longer example text for this example.'))
        Out[11]:
            [('needed', 'longer', 'example'),
             ('longer', 'example', 'text'),
             ('example', 'text', 'example')]


    minlen - filter out words that have fewer characters than 'minlen'.
    maxlen - filter out words that have more characters than 'maxlen'.
    """
    return tlz.pipe(string,
                    lower,
                    simple_split,
                    filter_longer_than(maxlen),
                    tlz.compose(tlz.concat, map_c(splitter_of_words)),
                    filter_shorter_than(minlen),
                    filter_stopwords,
                    sliding_window_c(n))
开发者ID:steven-cutting,项目名称:text2math,代码行数:30,代码来源:text2tokens.py

示例14: zhongji

def zhongji(ip='', username='', password=''):
    try:
        result = []
        child = telnet(ip, username, password)
        child.sendline(
            "display cu section bbs-config | in link-aggregation")
        while True:
            index = child.expect([hw_prompt, hw_pager], timeout=120)
            if index == 0:
                result.append(child.before)
                child.sendline('quit')
                child.expect(':')
                child.sendline('y')
                child.close()
                break
            else:
                result.append(child.before)
                child.send(" ")
                continue
    except (pexpect.EOF, pexpect.TIMEOUT) as e:
        return ['fail', None, ip]
    rslt = ''.join(result).split('\r\n')[1:-1]
    rec = [x.replace('\x1b[37D', '').strip().split()[2:]
           for x in rslt if 'add-member' in x]

    def port(x):
        p = x[2].split(',')
        p1 = ['/'.join((x[1], y)) for y in p]
        return list(cons(x[0], p1))

    ff = lambda x, y: merge_with(compose(unique, concat), x, y)
    rec1 = [port(x) for x in rec]
    rec2 = [{x[0]: x} for x in rec1]
    rec3 = reduce(ff, rec2, dict())
    return ['success', rec3, ip]
开发者ID:sjava,项目名称:olt,代码行数:35,代码来源:Huawei.py

示例15: doctable

def doctable(ctx):
    df = pd.read_csv('./docs/flight-options.csv')

    # open an existing document
    doc = docx.Document('./docs/style-reference.docx')

    as_int = partial(format_decimal, format='#')
    as_usd = partial(format_currency, currency='USD')

    s = doc.sections[0]
    width = s.page_width - s.left_margin - s.right_margin

    doc.add_picture('./docs/diagrams_002.png', width=width)

    formatters = {
        'ticket_price': as_usd,
        'total_hours': as_int,
        'trip': as_int,
        'airline': partial(shorten_long_name, width=20),
        'selected': compose({0: 'No', 1: 'Yes'}.get, int)
    }
    add_table(df, doc, table_style='Plain Table 3', formatters=formatters)

    # save the doc
    doc.save('./docs/test.docx')
开发者ID:ashapochka,项目名称:saapy,代码行数:25,代码来源:wordgen_samples.py


注:本文中的toolz.compose函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。