当前位置: 首页>>代码示例>>Python>>正文


Python StandardScaler.min方法代码示例

本文整理汇总了Python中sklearn.preprocessing.StandardScaler.min方法的典型用法代码示例。如果您正苦于以下问题:Python StandardScaler.min方法的具体用法?Python StandardScaler.min怎么用?Python StandardScaler.min使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.StandardScaler的用法示例。


在下文中一共展示了StandardScaler.min方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_transformer_pickle

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_transformer_pickle(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    # catch deprecation warnings
    with warnings.catch_warnings(record=True):
        transformer = Transformer()
    if not hasattr(transformer, 'transform'):
        return
    set_random_state(transformer)
    set_fast_parameters(transformer)

    # fit
    if name in CROSS_DECOMPOSITION:
        random_state = np.random.RandomState(seed=12345)
        y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
        y_ = y_.T
    else:
        y_ = y

    transformer.fit(X, y_)
    X_pred = transformer.fit(X, y_).transform(X)
    pickled_transformer = pickle.dumps(transformer)
    unpickled_transformer = pickle.loads(pickled_transformer)
    pickled_X_pred = unpickled_transformer.transform(X)

    assert_array_almost_equal(pickled_X_pred, X_pred)
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:31,代码来源:estimator_checks.py

示例2: check_classifiers_classes

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_classifiers_classes(name, Classifier):
    X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
    X, y = shuffle(X, y, random_state=7)
    X = StandardScaler().fit_transform(X)
    # We need to make sure that we have non negative data, for things
    # like NMF
    X -= X.min() - .1
    y_names = np.array(["one", "two", "three"])[y]

    for y_names in [y_names, y_names.astype('O')]:
        if name in ["LabelPropagation", "LabelSpreading"]:
            # TODO some complication with -1 label
            y_ = y
        else:
            y_ = y_names

        classes = np.unique(y_)
        # catch deprecation warnings
        with warnings.catch_warnings(record=True):
            classifier = Classifier()
        if name == 'BernoulliNB':
            classifier.set_params(binarize=X.mean())
        set_fast_parameters(classifier)
        # fit
        classifier.fit(X, y_)

        y_pred = classifier.predict(X)
        # training set performance
        assert_array_equal(np.unique(y_), np.unique(y_pred))
        if np.any(classifier.classes_ != classes):
            print("Unexpected classes_ attribute for %r: "
                  "expected %s, got %s" %
                  (classifier, classes, classifier.classes_))
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:35,代码来源:estimator_checks.py

示例3: test_transformers_data_not_an_array

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers_data_not_an_array():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter='transformer')
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    # We need to make sure that we have non negative data, for things
    # like NMF
    X -= X.min() - .1

    for name, Transformer in transformers:
        # XXX: some transformers are transforming the input
        # data. This is a bug that we'll fix later. Right now we copy
        # the data each time
        this_X = NotAnArray(X.copy())
        this_y = NotAnArray(np.asarray(y))
        if name in dont_test:
            continue
        # these don't actually fit the data:
        if name in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
            continue
        # And these wan't multivariate output
        if name in ('PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD'):
            continue
        yield check_transformer, name, Transformer, this_X, this_y
开发者ID:akashaio,项目名称:scikit-learn,代码行数:28,代码来源:test_common.py

示例4: check_transformer_general

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_transformer_general(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
    _check_transformer(name, Transformer, X.tolist(), y.tolist())
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:9,代码来源:estimator_checks.py

示例5: test_transformers_pickle

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers_pickle():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter='transformer')
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    succeeded = True

    for name, Transformer in transformers:
        if name in dont_test:
            continue
        # catch deprecation warnings
        with warnings.catch_warnings(record=True):
            transformer = Transformer()
        if not hasattr(transformer, 'transform'):
            continue
        set_random_state(transformer)
        if hasattr(transformer, 'compute_importances'):
            transformer.compute_importances = True

        if name == "SelectKBest":
            # SelectKBest has a default of k=10
            # which is more feature than we have.
            transformer.k = 1
        elif name in ['GaussianRandomProjection', 'SparseRandomProjection']:
            # Due to the jl lemma and very few samples, the number
            # of components of the random matrix projection will be greater
            # than the number of features.
            # So we impose a smaller number (avoid "auto" mode)
            transformer.n_components = 1

        # fit
        if name in ('PLSCanonical', 'PLSRegression', 'CCA',
                    'PLSSVD'):
            random_state = np.random.RandomState(seed=12345)
            y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
            y_ = y_.T
        else:
            y_ = y

        transformer.fit(X, y_)
        X_pred = transformer.fit(X, y_).transform(X)
        pickled_transformer = pickle.dumps(transformer)
        unpickled_transformer = pickle.loads(pickled_transformer)
        pickled_X_pred = unpickled_transformer.transform(X)

        try:
            assert_array_almost_equal(pickled_X_pred, X_pred)
        except Exception as exc:
            succeeded = False
            print ("Transformer %s doesn't predict the same value "
                   "after pickling" % name)
            raise exc

    assert_true(succeeded)
开发者ID:BrenBarn,项目名称:scikit-learn,代码行数:61,代码来源:test_common.py

示例6: check_transformer_data_not_an_array

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_transformer_data_not_an_array(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    # We need to make sure that we have non negative data, for things
    # like NMF
    X -= X.min() - 0.1
    this_X = NotAnArray(X)
    this_y = NotAnArray(np.asarray(y))
    _check_transformer(name, Transformer, this_X, this_y)
开发者ID:nomadcube,项目名称:scikit-learn,代码行数:11,代码来源:estimator_checks.py

示例7: test_transformers_pickle

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers_pickle():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter="transformer")
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    for name, Transformer in transformers:
        if name in dont_test:
            continue
        yield check_transformer_pickle, name, Transformer, X, y
开发者ID:GanymedeH,项目名称:scikit-learn,代码行数:15,代码来源:test_common.py

示例8: test_transformers

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter="transformer")
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    for name, Transformer in transformers:
        if name in dont_test:
            continue
        # these don't actually fit the data:
        if name in ["AdditiveChi2Sampler", "Binarizer", "Normalizer"]:
            continue
        yield check_transformer, name, Transformer, X, y
开发者ID:GanymedeH,项目名称:scikit-learn,代码行数:17,代码来源:test_common.py

示例9: check_transformer

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_transformer(name, Transformer):
    if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
        # Those transformers yield non-deterministic output when executed on
        # a 32bit Python. The same transformers are stable on 64bit Python.
        # FIXME: try to isolate a minimalistic reproduction case only depending
        # on numpy & scipy and/or maybe generate a test dataset that does not
        # cause such unstable behaviors.
        msg = name + ' is non deterministic on 32bit Python'
        raise SkipTest(msg)

    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
开发者ID:dashuye4,项目名称:scikit-learn,代码行数:17,代码来源:estimator_checks.py

示例10: check_transformer_pickle

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def check_transformer_pickle(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    # catch deprecation warnings
    with warnings.catch_warnings(record=True):
        transformer = Transformer()
    if not hasattr(transformer, 'transform'):
        return
    set_random_state(transformer)
    if hasattr(transformer, 'compute_importances'):
        transformer.compute_importances = True

    if name == "SelectKBest":
        # SelectKBest has a default of k=10
        # which is more feature than we have.
        transformer.k = 1
    elif name in ['GaussianRandomProjection', 'SparseRandomProjection']:
        # Due to the jl lemma and very few samples, the number
        # of components of the random matrix projection will be greater
        # than the number of features.
        # So we impose a smaller number (avoid "auto" mode)
        transformer.n_components = 1

    if "n_iter" in transformer.get_params():
        # speed up some estimators
        transformer.set_params(n_iter=5)

    # fit
    if name in CROSS_DECOMPOSITION:
        random_state = np.random.RandomState(seed=12345)
        y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
        y_ = y_.T
    else:
        y_ = y

    transformer.fit(X, y_)
    X_pred = transformer.fit(X, y_).transform(X)
    pickled_transformer = pickle.dumps(transformer)
    unpickled_transformer = pickle.loads(pickled_transformer)
    pickled_X_pred = unpickled_transformer.transform(X)

    assert_array_almost_equal(pickled_X_pred, X_pred)
开发者ID:dashuye4,项目名称:scikit-learn,代码行数:47,代码来源:estimator_checks.py

示例11: standardize

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def standardize(array, name):
    """Recieves a dataFrame or Series (from pandas) and returns a numpy array with zero mean and unit variance."""
    # Transform to numpy array
    nparray = array.as_matrix().reshape(array.shape[0],1).astype('float32')
    print('------------')
    print(name)
    print('Different values before:', np.unique(nparray).shape[0])

    # Standardize the data
    nparray = StandardScaler().fit_transform(nparray)

    # Print some information
    print('Mean:', nparray.mean())
    print('Max:', nparray.max())
    print('Min:', nparray.min())
    print('Std:', nparray.std())
    print('Different values after:', np.unique(nparray).shape[0])

    return nparray
开发者ID:SetaSouto,项目名称:BimboInvDemand,代码行数:21,代码来源:data.py

示例12: test_transformers

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    estimators = all_estimators()
    transformers = [(name, E) for name, E in estimators if issubclass(E,
        TransformerMixin)]
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
            random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    succeeded = True

    for name, Trans in transformers:
        if Trans in dont_test or Trans in meta_estimators:
            continue
        # these don't actually fit the data:
        if Trans in [AdditiveChi2Sampler, Binarizer, Normalizer]:
            continue
        # catch deprecation warnings
        with warnings.catch_warnings(record=True):
            trans = Trans()
        set_random_state(trans)
        if hasattr(trans, 'compute_importances'):
            trans.compute_importances = True

        if Trans is SelectKBest:
            # SelectKBest has a default of k=10
            # which is more feature than we have.
            trans.k = 1

        # fit

        if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
            y_ = np.vstack([y, 2 * y + np.random.randint(2, size=len(y))])
            y_ = y_.T
        else:
            y_ = y

        try:
            trans.fit(X, y_)
            X_pred = trans.fit_transform(X, y=y_)
            if isinstance(X_pred, tuple):
                for x_pred in X_pred:
                    assert_equal(x_pred.shape[0], n_samples)
            else:
                assert_equal(X_pred.shape[0], n_samples)
        except Exception as e:
            print trans
            print e
            print
            succeeded = False
            continue

        if hasattr(trans, 'transform'):
            if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
                X_pred2 = trans.transform(X, y_)
            else:
                X_pred2 = trans.transform(X)
            if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
                for x_pred, x_pred2 in zip(X_pred, X_pred2):
                    assert_array_almost_equal(x_pred, x_pred2, 2,
                        "fit_transform not correct in %s" % Trans)
            else:
                assert_array_almost_equal(X_pred, X_pred2, 2,
                    "fit_transform not correct in %s" % Trans)

            # raises error on malformed input for transform
            assert_raises(ValueError, trans.transform, X.T)
    assert_true(succeeded)
开发者ID:ahmed26,项目名称:scikit-learn,代码行数:73,代码来源:test_common.py

示例13: test_transformers

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter="transformer")
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]], random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    succeeded = True

    for name, Transformer in transformers:
        if name in dont_test:
            continue
        # these don't actually fit the data:
        if name in ["AdditiveChi2Sampler", "Binarizer", "Normalizer"]:
            continue
        # catch deprecation warnings
        with warnings.catch_warnings(record=True):
            transformer = Transformer()
        set_random_state(transformer)
        if hasattr(transformer, "compute_importances"):
            transformer.compute_importances = True

        if name == "SelectKBest":
            # SelectKBest has a default of k=10
            # which is more feature than we have.
            transformer.k = 1
        elif name in ["GaussianRandomProjection", "SparseRandomProjection"]:
            # Due to the jl lemma and very few samples, the number
            # of components of the random matrix projection will be greater
            # than the number of features.
            # So we impose a smaller number (avoid "auto" mode)
            transformer.n_components = 1
        elif name == "MiniBatchDictionaryLearning":
            transformer.set_params(n_iter=5)  # default = 1000

        elif name == "KernelPCA":
            transformer.remove_zero_eig = False

        # fit

        if name in ("PLSCanonical", "PLSRegression", "CCA", "PLSSVD"):
            y_ = np.c_[y, y]
            y_[::2, 1] *= 2
        else:
            y_ = y

        try:
            transformer.fit(X, y_)
            X_pred = transformer.fit_transform(X, y=y_)
            if isinstance(X_pred, tuple):
                for x_pred in X_pred:
                    assert_equal(x_pred.shape[0], n_samples)
            else:
                assert_equal(X_pred.shape[0], n_samples)
        except Exception as e:
            print(transformer)
            print(e)
            print()
            succeeded = False
            continue

        if hasattr(transformer, "transform"):
            if name in ("PLSCanonical", "PLSRegression", "CCA", "PLSSVD"):
                X_pred2 = transformer.transform(X, y_)
                X_pred3 = transformer.fit_transform(X, y=y_)
            else:
                X_pred2 = transformer.transform(X)
                X_pred3 = transformer.fit_transform(X, y=y_)
            if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
                for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
                    assert_array_almost_equal(x_pred, x_pred2, 2, "fit_transform not correct in %s" % Transformer)
                    assert_array_almost_equal(x_pred3, x_pred2, 2, "fit_transform not correct in %s" % Transformer)
            else:
                assert_array_almost_equal(X_pred, X_pred2, 2, "fit_transform not correct in %s" % Transformer)
                assert_array_almost_equal(X_pred3, X_pred2, 2, "fit_transform not correct in %s" % Transformer)

            # raises error on malformed input for transform
            assert_raises(ValueError, transformer.transform, X.T)
    assert_true(succeeded)
开发者ID:nicomahler,项目名称:scikit-learn,代码行数:83,代码来源:test_common.py

示例14: test_transformers

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
def test_transformers():
    # test if transformers do something sensible on training set
    # also test all shapes / shape errors
    transformers = all_estimators(type_filter='transformer')
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    n_samples, n_features = X.shape
    X = StandardScaler().fit_transform(X)
    X -= X.min()

    succeeded = True

    for name, Trans in transformers:
        trans = None

        if Trans in dont_test:
            continue
        # these don't actually fit the data:
        if Trans in [AdditiveChi2Sampler, Binarizer, Normalizer]:
            continue
        # catch deprecation warnings
        with warnings.catch_warnings(record=True):
            trans = Trans()
        set_random_state(trans)
        if hasattr(trans, 'compute_importances'):
            trans.compute_importances = True

        if Trans is SelectKBest:
            # SelectKBest has a default of k=10
            # which is more feature than we have.
            trans.k = 1
        elif Trans in [GaussianRandomProjection,
                       SparseRandomProjection]:
            # Due to the jl lemma and very few samples, the number
            # of components of the random matrix projection will be greater
            # than the number of features.
            # So we impose a smaller number (avoid "auto" mode)
            trans.n_components = 1

        # fit

        if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
            random_state = np.random.RandomState(seed=12345)
            y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
            y_ = y_.T
        else:
            y_ = y

        try:
            trans.fit(X, y_)
            X_pred = trans.fit_transform(X, y=y_)
            if isinstance(X_pred, tuple):
                for x_pred in X_pred:
                    assert_equal(x_pred.shape[0], n_samples)
            else:
                assert_equal(X_pred.shape[0], n_samples)
        except Exception as e:
            print trans
            print e
            print
            succeeded = False
            continue

        if hasattr(trans, 'transform'):
            if Trans in (_PLS, PLSCanonical, PLSRegression, CCA, PLSSVD):
                X_pred2 = trans.transform(X, y_)
            else:
                X_pred2 = trans.transform(X)
            if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
                for x_pred, x_pred2 in zip(X_pred, X_pred2):
                    assert_array_almost_equal(
                        x_pred, x_pred2, 2,
                        "fit_transform not correct in %s" % Trans)
            else:
                assert_array_almost_equal(
                    X_pred, X_pred2, 2,
                    "fit_transform not correct in %s" % Trans)

            # raises error on malformed input for transform
            assert_raises(ValueError, trans.transform, X.T)
    assert_true(succeeded)
开发者ID:nwf5d,项目名称:scikit-learn,代码行数:83,代码来源:test_common.py

示例15: print

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import min [as 别名]
    bad_inds=np.where(totalPVs>bad_perc)[0]
    bad_inds1=np.where(totalPVs<=5)[0]
    bad_inds=np.union1d(bad_inds,bad_inds1)

    very_active_inds=np.setdiff1d(va_inds,bad_inds)
    print(va_inds.shape,bad_inds.shape,very_active_inds.shape)

    featMatrix=featMatrix[very_active_inds,:]
    print('Teenagers',featMatrix.sum(axis=0))


    featMatrixNormalized=Normalizer(norm='l2').fit_transform(featMatrix)
    featMatrixSTD=StandardScaler().fit_transform(featMatrix)
    featMatrixSTD=featMatrixSTD#+np.abs(featMatrixSTD.min())+1.e-15
    print(featMatrixSTD.min())
    #featMatrix=RobustScaler(with_centering=False).fit_transform(featMatrix)

    nmfTrf=TruncatedSVD(n_components=10)
    nmfFeats=nmfTrf.fit_transform(featMatrixSTD)
    dfTest=paDataFrame(featMatrixSTD[:,:10])

    corr=np.dot(featMatrix,featMatrix.T)
    print(corr.shape)

    bandwidth = estimate_bandwidth(featMatrix, quantile=0.2, n_samples=500)
    ms = MeanShift(bandwidth=bandwidth*0.7, bin_seeding=True)
    print('bandwidth',bandwidth)
    labels=ms.fit_predict(featMatrix)

开发者ID:Froskekongen,项目名称:content-consumption,代码行数:30,代码来源:consume_profiles.py


注:本文中的sklearn.preprocessing.StandardScaler.min方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。