当前位置: 首页>>代码示例>>Python>>正文


Python six.iteritems方法代码示例

本文整理汇总了Python中sklearn.externals.six.iteritems方法的典型用法代码示例。如果您正苦于以下问题:Python six.iteritems方法的具体用法?Python six.iteritems怎么用?Python six.iteritems使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.externals.six的用法示例。


在下文中一共展示了six.iteritems方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def fit(self, Z, **fit_params):
        """TODO: rewrite docstring
        Fit all transformers using X.
        Parameters
        ----------
        X : array-like or sparse matrix, shape (n_samples, n_features)
            Input data, used to fit transformers.
        """
        fit_params_steps = dict((step, {})
                                for step, _ in self.transformer_list)

        for pname, pval in six.iteritems(fit_params):
            step, param = pname.split('__', 1)
            fit_params_steps[step][param] = pval

        transformers = Parallel(n_jobs=self.n_jobs, backend="threading")(
            delayed(_fit_one_transformer)(trans, Z, **fit_params_steps[name])
            for name, trans in self.transformer_list)
        self._update_transformer_list(transformers)
        return self 
开发者ID:lensacom,项目名称:sparkit-learn,代码行数:22,代码来源:pipeline.py

示例2: test_type_of_target

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def test_type_of_target():
    for group, group_examples in iteritems(EXAMPLES):
        for example in group_examples:
            assert_equal(type_of_target(example), group,
                         msg=('type_of_target(%r) should be %r, got %r'
                              % (example, group, type_of_target(example))))

    for example in NON_ARRAY_LIKE_EXAMPLES:
        msg_regex = 'Expected array-like \(array or non-string sequence\).*'
        assert_raises_regex(ValueError, msg_regex, type_of_target, example)

    for example in MULTILABEL_SEQUENCES:
        msg = ('You appear to be using a legacy multi-label data '
               'representation. Sequence of sequences are no longer supported;'
               ' use a binary array or sparse matrix instead.')
        assert_raises_regex(ValueError, msg, type_of_target, example)

    try:
        from pandas import SparseSeries
    except ImportError:
        raise SkipTest("Pandas not found")

    y = SparseSeries([1, 0, 0, 1, 0])
    msg = "y cannot be class 'SparseSeries'."
    assert_raises_regex(ValueError, msg, type_of_target, y) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:27,代码来源:test_multiclass.py

示例3: get_params

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def get_params(self, deep=True):
        """ Get classifier parameter names for GridSearch"""
        if not deep:
            return super(MajorityVoteClassifier, self).get_params(deep=False)
        else:
            out = self.named_classifiers.copy()
            for name, step in six.iteritems(self.named_classifiers):
                for key, value in six.iteritems(step.get_params(deep=True)):
                    out['%s__%s' % (name, key)] = value
            return out 
开发者ID:rrlyman,项目名称:PythonMachineLearningExamples,代码行数:12,代码来源:p206_majority_vote_classifier.py

示例4: _clone_h2o_obj

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _clone_h2o_obj(estimator, ignore=False, **kwargs):
    # do initial clone
    est = clone(estimator)

    # set kwargs:
    if kwargs:
        for k, v in six.iteritems(kwargs):
            setattr(est, k, v)

    # check on h2o estimator
    if isinstance(estimator, H2OPipeline):
        # the last step from the original estimator
        e = estimator.steps[-1][1]
        if isinstance(e, H2OEstimator):
            last_step = est.steps[-1][1]

            # so it's the last step
            for k, v in six.iteritems(e._parms):
                k, v = _kv_str(k, v)

                # if (not k in PARM_IGNORE) and (not v is None):
                #   e._parms[k] = v
                last_step._parms[k] = v

                # otherwise it's an BaseH2OFunctionWrapper
    return est 
开发者ID:tgsmith61591,项目名称:skutil,代码行数:28,代码来源:grid_search.py

示例5: _new_base_estimator

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _new_base_estimator(est, clonable_kwargs):
    """When the grid searches are pickled, the estimator
    has to be dropped out. When we load it back in, we have
    to reinstate a new one, since the fit is predicated on
    being able to clone a base estimator, we've got to have
    an estimator to clone and fit.

    Parameters
    ----------

    est : str
        The type of model to build

    Returns
    -------

    estimator : H2OEstimator
        The cloned base estimator
    """
    est_map = {
        'dl':   H2ODeepLearningEstimator,
        'gbm':  H2OGradientBoostingEstimator,
        'glm':  H2OGeneralizedLinearEstimator,
        # 'glrm': H2OGeneralizedLowRankEstimator,
        # 'km'  : H2OKMeansEstimator,
        'nb':   H2ONaiveBayesEstimator,
        'rf':   H2ORandomForestEstimator
    }

    estimator = est_map[est]()  # initialize the new ones
    for k, v in six.iteritems(clonable_kwargs):
        k, v = _kv_str(k, v)
        estimator._parms[k] = v

    return estimator 
开发者ID:tgsmith61591,项目名称:skutil,代码行数:37,代码来源:grid_search.py

示例6: transform

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def transform(self, X):
        """Transform a test matrix given the already-fit transformer.

        Parameters
        ----------

        X : Pandas ``DataFrame``
            The Pandas frame to transform. The operation will
            be applied to a copy of the input data, and the result
            will be returned.


        Returns
        -------

        X : Pandas ``DataFrame``
            The operation is applied to a copy of ``X``,
            and the result set is returned.
        """
        check_is_fitted(self, 'sq_nms_')

        # check on state of X and cols
        X, _ = validate_is_pd(X, self.cols)
        sq_nms_ = self.sq_nms_

        # scale by norms
        for nm, the_norm in six.iteritems(sq_nms_):
            X[nm] /= the_norm

        return X if self.as_df else X.as_matrix() 
开发者ID:tgsmith61591,项目名称:skutil,代码行数:32,代码来源:transform.py

示例7: _sort_features

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _sort_features(self, X, vocabulary):
        """Sort features by name

        Returns a reordered matrix and modifies the vocabulary in place
        """
        sorted_features = sorted(six.iteritems(vocabulary))
        map_index = np.empty(len(sorted_features), dtype=np.int32)
        for new_val, (term, old_val) in enumerate(sorted_features):
            vocabulary[term] = new_val
            map_index[old_val] = new_val

        X.indices = map_index.take(X.indices, mode='clip')
        return X 
开发者ID:prozhuchen,项目名称:2016CCF-sougou,代码行数:15,代码来源:STFIWF.py

示例8: _limit_features

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _limit_features(self, X, vocabulary, high=None, low=None,
                        limit=None):
        """Remove too rare or too common features.

        Prune features that are non zero in more samples than high or less
        documents than low, modifying the vocabulary, and restricting it to
        at most the limit most frequent.

        This does not prune samples with zero features.
        """
        if high is None and low is None and limit is None:
            return X, set()

        # Calculate a mask based on document frequencies
        dfs = _document_frequency(X)
        tfs = np.asarray(X.sum(axis=0)).ravel()
        mask = np.ones(len(dfs), dtype=bool)
        if high is not None:
            mask &= dfs <= high
        if low is not None:
            mask &= dfs >= low
        if limit is not None and mask.sum() > limit:
            mask_inds = (-tfs[mask]).argsort()[:limit]
            new_mask = np.zeros(len(dfs), dtype=bool)
            new_mask[np.where(mask)[0][mask_inds]] = True
            mask = new_mask

        new_indices = np.cumsum(mask) - 1  # maps old indices to new
        removed_terms = set()
        for term, old_index in list(six.iteritems(vocabulary)):
            if mask[old_index]:
                vocabulary[term] = new_indices[old_index]
            else:
                del vocabulary[term]
                removed_terms.add(term)
        kept_indices = np.where(mask)[0]
        if len(kept_indices) == 0:
            raise ValueError("After pruning, no terms remain. Try a lower"
                             " min_df or a higher max_df.")
        return X[:, kept_indices], removed_terms 
开发者ID:prozhuchen,项目名称:2016CCF-sougou,代码行数:42,代码来源:STFIWF.py

示例9: get_feature_names

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def get_feature_names(self):
        """Array mapping from feature integer indices to feature name"""
        self._check_vocabulary()

        return [t for t, i in sorted(six.iteritems(self.vocabulary_),
                                     key=itemgetter(1))] 
开发者ID:prozhuchen,项目名称:2016CCF-sougou,代码行数:8,代码来源:STFIWF.py

示例10: topological_sort

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def topological_sort(deps):
    '''
    Topologically sort a DAG, represented by a dict of child => set of parents.
    The dependency dict is destroyed during operation.

    Uses the Kahn algorithm: http://en.wikipedia.org/wiki/Topological_sorting
    Not a particularly good implementation, but we're just running it on tiny
    graphs.
    '''
    order = []
    available = set()

    def _move_available():
        to_delete = []
        for n, parents in iteritems(deps):
            if not parents:
                available.add(n)
                to_delete.append(n)
        for n in to_delete:
            del deps[n]

    _move_available()
    while available:
        n = available.pop()
        order.append(n)
        for parents in itervalues(deps):
            parents.discard(n)
        _move_available()

    if available:
        raise ValueError("dependency cycle found")
    return order 
开发者ID:djsutherland,项目名称:skl-groups,代码行数:34,代码来源:knn.py

示例11: _set_up_funcs

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _set_up_funcs(funcs, metas_ordered, Ks, dim, X_ns=None, Y_ns=None):
    # replace functions with partials of args
    def replace_func(func, info):
        needs_alpha = getattr(func, 'needs_alpha', False)

        new = None
        args = (Ks, dim)
        if needs_alpha:
            args = (info.alphas,) + args

        if hasattr(func, 'chooser_fn'):
            args += (X_ns, Y_ns)
            if (getattr(func, 'needs_all_ks', False) and
                    getattr(func.chooser_fn, 'returns_ks', False)):
                new, K = func.chooser_fn(*args)
                new.K_needed = K
            else:
                new = func.chooser_fn(*args)
        else:
            new = partial(func, *args)

        for attr in dir(func):
            if not (attr.startswith('__') or attr.startswith('func_')):
                setattr(new, attr, getattr(func, attr))
        return new

    rep_funcs = dict(
        (replace_func(f, info), info) for f, info in iteritems(funcs))
    rep_metas_ordered = OrderedDict(
        (replace_func(f, info), info) for f, info in iteritems(metas_ordered))

    return rep_funcs, rep_metas_ordered 
开发者ID:djsutherland,项目名称:skl-groups,代码行数:34,代码来源:knn.py

示例12: __getitem__

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def __getitem__(self, key):
        if (isinstance(key, string_types) or
                (isinstance(key, (tuple, list)) and
                 any(isinstance(x, string_types) for x in key))):
            msg = "Features indexing only subsets rows, but got {!r}"
            raise TypeError(msg.format(key))

        if np.isscalar(key):
            return self.features[key]
        else:
            return type(self)(self.features[key], copy=False, stack=False,
                              **{k: v[key] for k, v in iteritems(self.meta)}) 
开发者ID:djsutherland,项目名称:skl-groups,代码行数:14,代码来源:features.py

示例13: test_type_utils

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def test_type_utils():
    tests = {
        'bool': (np.array([False, True]), False, True),
        'int32': (np.arange(10, dtype=np.int32), True, True),
        'int64': (np.arange(10, dtype=np.int64), True, True),
        'float32': (np.arange(10, dtype=np.float32), False, False),
        'float64': (np.arange(10, dtype=np.float64), False, False),
    }

    for name, (a, is_int, is_cat) in iteritems(tests):
        assert utils.is_integer_type(a) == is_int, name
        assert utils.is_categorical_type(a) == is_cat, name

        assert utils.is_integer(a[0]) == is_int, name
        assert utils.is_categorical(a[0]) == is_cat, name

    assert utils.is_integer_type(utils.as_integer_type(tests['float32'][0]))
    assert utils.is_integer_type(utils.as_integer_type(tests['float64'][0]))
    assert_raises(
        ValueError, lambda: utils.as_integer_type(tests['float32'][0] + .2))

    assert utils.is_integer(5)
    assert utils.is_categorical(False)
    assert utils.is_categorical(True)

################################################################################ 
开发者ID:djsutherland,项目名称:skl-groups,代码行数:28,代码来源:test_utils.py

示例14: _pre_transform

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def _pre_transform(self, Z, **fit_params):
        fit_params_steps = dict((step, {}) for step, _ in self.steps)
        for pname, pval in six.iteritems(fit_params):
            step, param = pname.split('__', 1)
            fit_params_steps[step][param] = pval
        Zp = Z.persist()
        for name, transform in self.steps[:-1]:
            if hasattr(transform, "fit_transform"):
                Zt = transform.fit_transform(Zp, **fit_params_steps[name])
            else:
                Zt = transform.fit(Zp, **fit_params_steps[name]) \
                              .transform(Zp)
            Zp.unpersist()
            Zp = Zt.persist()
        return Zp, fit_params_steps[self.steps[-1][0]] 
开发者ID:lensacom,项目名称:sparkit-learn,代码行数:17,代码来源:pipeline.py

示例15: get_params

# 需要导入模块: from sklearn.externals import six [as 别名]
# 或者: from sklearn.externals.six import iteritems [as 别名]
def get_params(self, deep=True):
        if not deep:
            return super(SparkPipeline, self).get_params(deep=False)
        else:
            out = self.named_steps.copy()
            for name, step in six.iteritems(self.named_steps):
                for key, value in six.iteritems(step.get_params(deep=True)):
                    out['%s__%s' % (name, key)] = value

            out.update(super(SparkPipeline, self).get_params(deep=False))
            return out 
开发者ID:lensacom,项目名称:sparkit-learn,代码行数:13,代码来源:pipeline.py


注:本文中的sklearn.externals.six.iteritems方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。