当前位置: 首页>>代码示例>>Python>>正文


Python toolz.unique函数代码示例

本文整理汇总了Python中toolz.unique函数的典型用法代码示例。如果您正苦于以下问题:Python unique函数的具体用法?Python unique怎么用?Python unique使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了unique函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: write_tables

def write_tables(fname, models, year):
    """
    Write all tables injected into `models` to a pandas.HDFStore file.
    If year is not None it will be used to prefix the table names so that
    multiple years can go in the same file.

    Parameters
    ----------
    fname : str
        File name for HDFStore. Will be opened in append mode and closed
        at the end of this function.
    models : list of str
        Models from which to gather injected tables for saving.
    year : int or None
        If an integer, used as a prefix along with table names for
        labeling DataFrames in the HDFStore.

    """
    models = (get_model(m) for m in toolz.unique(models))
    table_names = toolz.unique(toolz.concat(m._tables_used() for m in models))
    tables = (get_table(t) for t in table_names)

    key_template = '{}/{{}}'.format(year) if year is not None else '{}'

    with pd.get_store(fname, mode='a') as store:
        for t in tables:
            store[key_template.format(t.name)] = t.to_frame()
开发者ID:quintinvh,项目名称:urbansim,代码行数:27,代码来源:simulation.py

示例2: compute_up

def compute_up(expr, args, **kwargs):
    from_objs = list(unique(concat(map(get_all_froms, args))))
    if len(from_objs) > 1:
        # TODO: how do you do this in sql? please send help
        raise ValueError('only columns from the same table can be merged')

    cols = list(unique(concat(map(get_unsafe_inner_columns, args, expr.args))))
    sel = sa.select(cols, from_obj=from_objs[0])
    where = unify_wheres(args)
    if where is not None:
        sel = sel.where(where)
    return sel
开发者ID:blaze,项目名称:blaze,代码行数:12,代码来源:sql.py

示例3: render_tabular

def render_tabular(api, options=None):
  """Entry point for the tabular reporter interface."""
  # determine separator
  separator = options.get('report.separator', '\t')
  human = options.get('report.human')
  panel = options.get('report.panel')
  samples = options.get('report.samples')
  group = options.get('report.group')

  # read gene panel file if it has been set
  if panel:
    superblock_ids = [line.rstrip() for line in panel]
  else:
    superblock_ids = None

  # get sample ID, group and cutoff from metadata
  sample_query = limit_query(api.samples(), group=group, samples=samples)
  metadata = ((sample.id, sample.group_id, sample.cutoff)
              for sample in sample_query)

  # get the data
  base_query = limit_query(api.average_metrics(superblock_ids=superblock_ids),
                           group=group,
                           samples=samples)

  queries = [metadata,
             base_query,
             api.diagnostic_yield(superblock_ids=superblock_ids,
                                  group_id=group, sample_ids=samples),
             api.sex_checker(group_id=group, sample_ids=samples)]

  # group multiple queries by sample ID (first column)
  key_metrics = groupby(get(0), concat(queries))

  # get the column names dynamically from the query
  headers = concatv(['sample_id', 'group_id', 'cutoff'],
                    (column['name'] for column
                     in base_query.column_descriptions),
                    ['diagnostic yield', 'gender'])

  unique_headers = unique(headers)

  # iterate over all values, concat different query results, and keep
  # only the unique values (excluding second sample_id)
  data = (unique(concat(values)) for values in itervalues(key_metrics))

  if human:
    # export key_metrics in a more human friendly format
    return tabulate(data, unique_headers)

  # yield headers
  return '\n'.join(cons('#' + separator.join(unique_headers),
                        stringify_list(data, separator=separator)))
开发者ID:BadSeby,项目名称:chanjo-report,代码行数:53,代码来源:core.py

示例4: compile_components

def compile_components(summary, schema):
    """Given a ``Summary`` object and a table schema, returning 5 sub-functions.

    Parameters
    ----------
    summary : Summary
        The expression describing the aggregations to be computed.

    Returns
    -------
    A tuple of the following functions:

    ``create(shape)``
        Takes the aggregate shape, and returns a tuple of initialized numpy
        arrays.

    ``info(df)``
        Takes a dataframe, and returns preprocessed 1D numpy arrays of the
        needed columns.

    ``append(i, x, y, *aggs_and_cols)``
        Appends the ``i``th row of the table to the ``(x, y)`` bin, given the
        base arrays and columns in ``aggs_and_cols``. This does the bulk of the
        work.

    ``combine(base_tuples)``
        Combine a list of base tuples into a single base tuple. This forms the
        reducing step in a reduction tree.

    ``finalize(aggs)``
        Given a tuple of base numpy arrays, returns the finalized
        ``dynd`` array.
    """
    paths, reds = zip(*preorder_traversal(summary))

    # List of base reductions (actually computed)
    bases = list(unique(concat(r._bases for r in reds)))
    dshapes = [b.out_dshape(schema) for b in bases]
    # List of tuples of (append, base, input columns, temps)
    calls = [_get_call_tuples(b, d) for (b, d) in zip(bases, dshapes)]
    # List of unique column names needed
    cols = list(unique(concat(pluck(2, calls))))
    # List of temps needed
    temps = list(pluck(3, calls))

    create = make_create(bases, dshapes)
    info = make_info(cols)
    append = make_append(bases, cols, calls)
    combine = make_combine(bases, dshapes, temps)
    finalize = make_finalize(bases, summary, schema)

    return create, info, append, combine, finalize
开发者ID:jcrist,项目名称:datashader,代码行数:52,代码来源:compiler.py

示例5: alts_columns_used

    def alts_columns_used(self):
        """
        Columns from the alternatives table that are used for filtering.

        """
        return list(toolz.unique(toolz.concat(
            m.alts_columns_used() for m in self.models.values())))
开发者ID:egeriicw,项目名称:urbansim,代码行数:7,代码来源:lcm.py

示例6: choosers_columns_used

    def choosers_columns_used(self):
        """
        Columns from the choosers table that are used for filtering.

        """
        return list(toolz.unique(toolz.concat(
            m.choosers_columns_used() for m in self.models.values())))
开发者ID:egeriicw,项目名称:urbansim,代码行数:7,代码来源:lcm.py

示例7: schema

    def schema(self):
        group = self.grouper.schema[0].parameters[0]
        reduction_name = type(self.apply).__name__
        apply = self.apply.dshape[0].parameters[0]
        params = unique(group + apply, key=lambda x: x[0])

        return dshape(Record(list(params)))
开发者ID:ChrisBeaumont,项目名称:blaze,代码行数:7,代码来源:table.py

示例8: gpon_svlan

def gpon_svlan(ip='', username='', password='', slots=None):
    ports = product(slots, range(1, 9))
    cmds = map(
        lambda x: "show service-port interface gpon-olt_1/{0}/{1}".format(x[0], x[1]), ports)
    try:
        svlan = []
        child = telnet(ip, username, password)
        for cmd in cmds:
            result = []
            child.sendline(cmd)
            while True:
                index = child.expect(
                    [zte_prompt, zte_pager], timeout=120)
                if index == 0:
                    result.append(child.before)
                    break
                else:
                    result.append(child.before)
                    child.send(' ')
                    continue
            r = ''.join(result).split('\r\n')[1:-1]
            v = [x.replace('\x08', '').strip().split()[1]
                 for x in r if 'OK' in x and 'YES' in x]
            v1 = [x for x in v if x.isdigit()]
            p = re.findall(r'\d/\d{1,2}/\d', cmd)
            svlan += product(p, unique(v1))
        child.sendline('exit')
        child.close()
    except (pexpect.EOF, pexpect.TIMEOUT) as e:
        return ['fail', None, ip]

    return ['success', svlan, ip]
开发者ID:sjava,项目名称:olt,代码行数:32,代码来源:Zte.py

示例9: diagnostic_yield

  def diagnostic_yield(self, metric='completeness', cutoff=1,
                       superblock_ids=None, group_id=None, sample_ids=None):
    """Calculate diagnostic yield."""
    # extract column to filter on
    metric_column = getattr(BlockData, metric)

    # set up the base query for all blocks
    total_query = self.total_count(BlockData)

    if superblock_ids:
      # apply the superblock filter on the Block class level
      total_query = total_query.join(BlockData.parent)\
                               .filter(Block.superblock_id.in_(superblock_ids))

    # extend base query to include only passed blocks
    pass_query = total_query.filter(metric_column >= cutoff)

    # optionally limit query
    queries = [limit_query(query, group=group_id, samples=sample_ids)
               for query in (total_query, pass_query)]

    # group multiple queries by sample ID (first column)
    metrics = groupby(get(0), concat(queries))

    # iterate over all values, concat different query results, and keep
    # only the unique values (excluding second sample_id)
    combined = (unique(concat(values)) for values in itervalues(metrics))

    # calculate diagnostic yield by simple division
    for sample_id, group_id, total, covered in combined:
      yield sample_id, group_id, (covered / total)
开发者ID:henrikstranneheim,项目名称:chanjo-report,代码行数:31,代码来源:core.py

示例10: compute_up

def compute_up(expr, data, **kwargs):
    if not valid_grouper(expr):
        raise TypeError("Grouper must have a non-nested record or one "
                        "dimensional collection datashape, "
                        "got %s of type %r with dshape %s" %
                        (expr.grouper, type(expr.grouper).__name__, expr.dshape))

    s = alias_it(data)

    if valid_reducer(expr.apply):
        reduction = compute(expr.apply, s, post_compute=False)
    else:
        raise TypeError('apply must be a Summary expression')

    grouper = get_inner_columns(compute(expr.grouper, s, post_compute=False))
    reduction_columns = pipe(reduction.inner_columns,
                             map(get_inner_columns),
                             concat)
    columns = list(unique(chain(grouper, reduction_columns)))
    if (not isinstance(s, sa.sql.selectable.Alias) or
            (hasattr(s, 'froms') and isinstance(s.froms[0],
                                                sa.sql.selectable.Join))):
        assert len(s.froms) == 1, 'only a single FROM clause supported for now'
        from_obj, = s.froms
    else:
        from_obj = None

    return reconstruct_select(columns,
                              getattr(s, 'element', s),
                              from_obj=from_obj,
                              group_by=grouper)
开发者ID:earney,项目名称:blaze,代码行数:31,代码来源:sql.py

示例11: compute_up

def compute_up(expr, data, scope=None, **kwargs):
    data = lower_column(data)
    grouper = compute(
        expr.grouper,
        scope,
        post_compute=False,
        return_type='native',
        **kwargs
    )

    app = expr.apply
    reductions = [
        compute(
            val,
            data,
            post_compute=None,
            return_type='native',
        ).label(name)
        for val, name in zip(app.values, app.fields)
    ]

    froms = list(unique(chain(get_all_froms(grouper),
                              concat(map(get_all_froms, reductions)))))
    inner_cols = list(getattr(grouper, 'inner_columns', [grouper]))
    grouper_cols = inner_cols[:]
    inner_cols.extend(concat(
        getattr(getattr(r, 'element', None), 'inner_columns', [r])
        for r in reductions
    ))
    wheres = unify_wheres([grouper] + reductions)
    sel = unify_froms(sa.select(inner_cols, whereclause=wheres), froms)
    return sel.group_by(*grouper_cols)
开发者ID:giangzuzana,项目名称:blaze,代码行数:32,代码来源:sql.py

示例12: find_names

def find_names(node):
    """Return the unique :class:`ast.Name` instances in an AST.

    Parameters
    ----------
    node : ast.AST

    Returns
    -------
    unique_names : List[ast.Name]

    Examples
    --------
    >>> import ast
    >>> node = ast.parse('a + b')
    >>> names = find_names(node)
    >>> names  # doctest: +ELLIPSIS
    [<_ast.Name object at 0x...>, <_ast.Name object at 0x...>]
    >>> names[0].id
    'a'
    >>> names[1].id
    'b'
    """
    return list(
        toolz.unique(
            filter(None, NameFinder().find(node)),
            key=lambda node: (node.id, type(node.ctx)),
        )
    )
开发者ID:cloudera,项目名称:ibis,代码行数:29,代码来源:find.py

示例13: test_multi_column_join

def test_multi_column_join():
    metadata = sa.MetaData()
    lhs = sa.Table('aaa', metadata,
                   sa.Column('x', sa.Integer),
                   sa.Column('y', sa.Integer),
                   sa.Column('z', sa.Integer))

    rhs = sa.Table('bbb', metadata,
                   sa.Column('w', sa.Integer),
                   sa.Column('x', sa.Integer),
                   sa.Column('y', sa.Integer))

    L = symbol('L', 'var * {x: int, y: int, z: int}')
    R = symbol('R', 'var * {w: int, x: int, y: int}')
    joined = join(L, R, ['x', 'y'])

    expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
                           & (lhs.c.y == rhs.c.y))
    expected = select(list(unique(expected.columns, key=lambda c:
        c.name))).select_from(expected)

    result = compute(joined, {L: lhs, R: rhs})

    assert str(result) == str(expected)

    assert str(select(result)) == str(select(expected))

    # Schemas match
    print(result.c.keys())
    print(joined.fields)
    assert list(result.c.keys()) == list(joined.fields)
开发者ID:ymarfoq,项目名称:outilACVDesagregation,代码行数:31,代码来源:test_sql_compute.py

示例14: all_subaccounts

    def all_subaccounts(self):
        """
        Returns an iterator of all subaccounts that have a recorded transaction
        with the account.

        """
        return toolz.unique(t.subaccount for t in self.transactions)
开发者ID:advancedpartnerships,项目名称:urbansim,代码行数:7,代码来源:accounts.py

示例15: _get_variables

    def _get_variables(self):
        """Collect variables, updates and auxiliary variables.

        In addition collects all :class:`.Scan` ops and recurses in the
        respective inner Theano graphs.

        """
        updates = OrderedDict()

        shared_outputs = [o for o in self.outputs if is_shared_variable(o)]
        usual_outputs = [o for o in self.outputs if not is_shared_variable(o)]
        variables = shared_outputs

        if usual_outputs:
            # Sort apply nodes topologically, get variables and remove
            # duplicates
            inputs = graph.inputs(self.outputs)
            sorted_apply_nodes = graph.io_toposort(inputs, usual_outputs)
            self.scans = list(unique([node.op for node in sorted_apply_nodes
                                     if isinstance(node.op, Scan)],
                                     key=lambda op: id(op)))
            self._scan_graphs = [ComputationGraph(scan.outputs)
                                 for scan in self.scans]

            seen = set()
            main_vars = (
                [var for var in list(chain(
                    *[apply_node.inputs for apply_node in sorted_apply_nodes]))
                 if not (var in seen or seen.add(var))] +
                [var for var in self.outputs if var not in seen])

            # While preserving order add auxiliary variables, and collect
            # updates
            seen = set()
            # Intermediate variables could be auxiliary
            seen_avs = set(main_vars)
            variables = []
            for var in main_vars:
                variables.append(var)
                for annotation in getattr(var.tag, 'annotations', []):
                    if annotation not in seen:
                        seen.add(annotation)
                        new_avs = [
                            av for av in annotation.auxiliary_variables
                            if not (av in seen_avs or seen_avs.add(av))]
                        variables.extend(new_avs)
                        updates = dict_union(updates, annotation.updates)

        # If shared_variables is assigned default_update (cloned), we cannot eval()
        # it to get the real numpy array value, hence, try to trace back
        # original shared variable
        def shared_variable_filter(var):
            if is_shared_variable(var) and hasattr(var, 'default_update'):
                for annotation in var.tag.annotations:
                    if hasattr(annotation, var.name) and \
                       is_shared_variable(getattr(annotation, var.name)):
                        return getattr(annotation, var.name)
            return var
        self.variables = map(shared_variable_filter, variables)
        self.updates = updates
开发者ID:trungnt13,项目名称:blocks,代码行数:60,代码来源:__init__.py


注:本文中的toolz.unique函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。