当前位置: 首页>>代码示例>>Python>>正文


Python toolz.pipe函数代码示例

本文整理汇总了Python中toolz.pipe函数的典型用法代码示例。如果您正苦于以下问题:Python pipe函数的具体用法?Python pipe怎么用?Python pipe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了pipe函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_parses_figures_to_superpositions

def test_parses_figures_to_superpositions():
    "confirm figures yield expected superpositions"
    superpositions = (Superpositions.of_valid_figures(), Superpositions.of_flawed_figures())
    expected_superpositions = pipe(superpositions, concat, tuple)
    figures = (Figures.valid(), Figures.flawed())
    found_superpositions = pipe(figures, concat, superpositions_from_figures, tuple)
    assert expected_superpositions == found_superpositions
开发者ID:gJigsaw,项目名称:KataBankOCR,代码行数:7,代码来源:test_superpositions_from_figures.py

示例2: parser

def parser(filename, *args, **kwargs):
    g = nx.DiGraph()
    tz.pipe(filename, c_open(mode='r'),
            c.map(str.strip),
            c.map(c_split(sep=',')),
            g.add_edges_from)
    return g
开发者ID:jni,项目名称:prin,代码行数:7,代码来源:edge_pairs_csv.py

示例3: process

def process(paths, load_, transform_, filter_, sink_):
    """ Generic pipeline

    :param paths: input paths
    :param load_: data loading function
    :param transform_: transformation function
    :param filter_: filter functions
    :param sink_: output function
    :return:
    """
    for path in paths:
        pipe(path, load_, transform_, filter_, sink_)
开发者ID:eliasah,项目名称:airship-convert,代码行数:12,代码来源:__init__.py

示例4: count_predictions

 def count_predictions(filtered_predictions_list, target_label):
     return pipe(
         filtered_predictions_list,
         filter(lambda (_, x): x == target_label),
         list,
         len
     )
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:7,代码来源:_confusion_matrix_generator.py

示例5: ghost_internal

def ghost_internal(x, axes):
    """ Share boundaries between neighboring blocks

    Parameters
    ----------

    x: da.Array
        A dask array
    axes: dict
        The size of the shared boundary per axis

    The axes dict informs how many cells to overlap between neighboring blocks
    {0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
    """
    dims = list(map(len, x.blockdims))
    expand_key2 = partial(expand_key, dims=dims)
    interior_keys = pipe(x._keys(), flatten,
                                    map(expand_key2), map(flatten),
                                    concat, list)
    interior_slices = dict((k, fractional_slice(k, axes))
                            for k in interior_keys)

    shape = (3,) * x.ndim
    name = next(ghost_names)
    ghost_blocks = dict(((name,) + k[1:],
                         (rec_concatenate, (concrete, expand_key2(k))))
                        for k in interior_keys)

    blockdims = [  [bds[0] + axes.get(i, 0)]
                 + [bd + axes.get(i, 0) * 2 for bd in bds[1:-1]]
                 + [bds[-1] + axes.get(i, 0)]
                 for i, bds in enumerate(x.blockdims)]

    return Array(merge(interior_slices, ghost_blocks, x.dask),
                 name, blockdims=blockdims)
开发者ID:kastnerkyle,项目名称:dask,代码行数:35,代码来源:ghost.py

示例6: compute_up

def compute_up(expr, data, **kwargs):
    if not valid_grouper(expr):
        raise TypeError("Grouper must have a non-nested record or one "
                        "dimensional collection datashape, "
                        "got %s of type %r with dshape %s" %
                        (expr.grouper, type(expr.grouper).__name__, expr.dshape))

    s = alias_it(data)

    if valid_reducer(expr.apply):
        reduction = compute(expr.apply, s, post_compute=False)
    else:
        raise TypeError('apply must be a Summary expression')

    grouper = get_inner_columns(compute(expr.grouper, s, post_compute=False))
    reduction_columns = pipe(reduction.inner_columns,
                             map(get_inner_columns),
                             concat)
    columns = list(unique(chain(grouper, reduction_columns)))
    if (not isinstance(s, sa.sql.selectable.Alias) or
            (hasattr(s, 'froms') and isinstance(s.froms[0],
                                                sa.sql.selectable.Join))):
        assert len(s.froms) == 1, 'only a single FROM clause supported for now'
        from_obj, = s.froms
    else:
        from_obj = None

    return reconstruct_select(columns,
                              getattr(s, 'element', s),
                              from_obj=from_obj,
                              group_by=grouper)
开发者ID:earney,项目名称:blaze,代码行数:31,代码来源:sql.py

示例7: main

def main():
    transforms = [
        t.parentdir_expand,
        t.unambiguous_path,
        t.physical_path
    ]
    print(pipe(sys.argv[1], *transforms))
开发者ID:digwanderlust,项目名称:pathlt,代码行数:7,代码来源:__main__.py

示例8: freq

def freq(tokenset):
    """
    Find number of occurrences of each value 'tokenset'.
    """
    return tlz.pipe(tokenset,
                    tlz.frequencies,
                    dict.items)
开发者ID:steven-cutting,项目名称:SimpleTokenizer,代码行数:7,代码来源:utils.py

示例9: streaming_pca

def streaming_pca(samples, n_components=2, batch_size=50):
    ipca = decomposition.IncrementalPCA(n_components=n_components,
                                        batch_size=batch_size)
    _ = list(tz.pipe(samples, curried.partition(batch_size),
                     curried.map(np.array),
                     curried.map(ipca.partial_fit)))
    return ipca
开发者ID:jeromeku,项目名称:streaming-talk,代码行数:7,代码来源:session.py

示例10: __get_all_metrics_for_each_class

 def __get_all_metrics_for_each_class(self):
     def __get_all_metrics_for_class(confusion_table):
         return pmap({
             str(confusion_table.get_class_name()): pmap({
                 "Accuracy": confusion_table.accuracy,
                 "Precision": confusion_table.precision,
                 "Recall": confusion_table.recall,
                 "Specificity": confusion_table.specificity,
                 "F1score": confusion_table.f1score,
                 "Fall Out": confusion_table.fall_out,
                 "Miss Rate": confusion_table.miss_rate,
                 "False Discovery Rate": confusion_table.FDR,
                 "False Omission Rate": confusion_table.FOR,
                 "Negative Predictive Value": confusion_table.NPV,
                 "Positive Likelihood Ratio": confusion_table.PLR,
                 "Negative Likelihood Ratio": confusion_table.NLR,
                 "Diagnostic Odds Ratio": confusion_table.DOR,
             })
         })
     return pipe(
         self.__confusion_tables,
         itervalues,
         map(__get_all_metrics_for_class),
         reduce(lambda x, y: x + y),
     )
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:25,代码来源:_confusion_matrix.py

示例11: gender_from_bam

def gender_from_bam(bam_path, prefix=''):
  """Predict the gender from a BAM alignment file.

  Args:
    bam_path (path): path to a BAM alignment file
    prefix (str, optional): string to prefix to 'X', 'Y'

  Returns:
    Gender: tuple of X coverage, Y coverage, and sex prediction

  Examples:
    >>> gender_from_bam('alignment.bam', prefix='chr')
    Gender(x_coverage=123.31, y_coverage=0.13, sex='female')
  """
  # setup: connect to a BAM file
  bam = BamFile(bam_path)

  # step 0: fake some BED interval rows (already 1,1-based!)
  fake_bed_rows = [("%sX" % prefix, 1, 59373566),
                   ("%sY" % prefix, 69362, 11375310)]

  # step 1: run the pipeline
  sequence = pipe(
    fake_bed_rows,
    map(lambda interval: bam(*interval)),
    map(average)
  )

  # step: make the prediction
  x_coverage, y_coverage = list(sequence)
  sex = predict_gender(x_coverage, y_coverage)
  return Gender(x_coverage, y_coverage, sex)
开发者ID:dnil,项目名称:chanjo,代码行数:32,代码来源:core.py

示例12: ccds_to_bed

def ccds_to_bed(ccds_stream):
  """Convert CCDS dump to Chanjo-style BED stream.

  Main entry point for default Chanjo converter (ccds). It converts
  a sorted (start, chrom) CCDS database to the Chanjo BED-format.

  Args:
    ccds_stream (file): file handle to read CCDS lines from

  Yields:
    Interval: interval with merged block and superblock ids
  """
  return pipe(
    ccds_stream,
    filter(grep('Public')),                    # filter out Public tx
    map(text_type.rstrip),                     # strip \n and spaces
    map(split(sep='\t')),                      # split into list
    map(extract_intervals),                    # convert to Interval
    concat,                                    # flatten
    map(rename_sex_interval),                  # rename sex contigs
    partial(lazy_groupby, key=attrgetter('contig')),  # group by contig
    pluck(1),                                  # extract second item
    map(groupby(attrgetter('name'))),          # non-lazy group by id
    map(valmap(merge_related_elements)),       # group intervals
    map(itervalues),                           # extract values
    map(partial(sorted, key=attrgetter('start'))),  # sort by start pos
    concat                                     # flatten
  )
开发者ID:dnil,项目名称:chanjo,代码行数:28,代码来源:core.py

示例13: test__filter_stopwords

def test__filter_stopwords(tokenset, count):
    assert(tlz.pipe(tokenset,
                    utils.filter_stopwords,
                    list,
                    len,
                    lambda length: length == count,
                    ))
开发者ID:steven-cutting,项目名称:SimpleTokenizer,代码行数:7,代码来源:test_utils.py

示例14: alpino

def alpino(doc, output="raw"):
    """Wrapper around the Alpino (dependency) parser for Dutch.

    Expects an environment variable ALPINO_HOME to point at
    the Alpino installation dir.

    The script uses the 'dependencies' end_hook to generate lemmata and
    the dependency structure.

    Parameters
    ----------
    output : string
        If 'raw', returns the raw output from Alpino itself.
        If 'saf', returns a SAF dictionary.

    References
    ----------
    `Alpino homepage <http://www.let.rug.nl/vannoord/alp/Alpino/>`_
    """
    from ._alpino import tokenize, parse_raw, interpret_parse

    try:
        transf = {"raw": identity, "saf": interpret_parse}[output]
    except KeyError:
        raise ValueError("Unknown output format %r" % output)

    return pipe(doc, fetch, tokenize, parse_raw, transf)
开发者ID:kazoup,项目名称:xtas,代码行数:27,代码来源:single.py

示例15: get

def get(dsk, keys, optimizations=[fuse], num_workers=cpu_count):
    """ Multiprocessed get function appropriate for Bags """
    pool = _globals['pool']
    if pool is None:
        pool = multiprocessing.Pool(psutil.cpu_count())
        cleanup = True
    else:
        cleanup = False

    manager = multiprocessing.Manager()
    queue = manager.Queue()

    apply_async = dill_apply_async(pool.apply_async)

    # Optimize Dask
    dsk2 = pipe(dsk, partial(cull, keys=keys), *optimizations)

    try:
        # Run
        result = get_async(apply_async, cpu_count, dsk2, keys,
                           queue=queue)
    finally:
        if cleanup:
            pool.close()
    return result
开发者ID:kastnerkyle,项目名称:dask,代码行数:25,代码来源:multiprocessing.py


注:本文中的toolz.pipe函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。