当前位置: 首页>>代码示例>>Python>>正文


Python StandardScaler.tolist方法代码示例

本文整理汇总了Python中sklearn.preprocessing.StandardScaler.tolist方法的典型用法代码示例。如果您正苦于以下问题:Python StandardScaler.tolist方法的具体用法?Python StandardScaler.tolist怎么用?Python StandardScaler.tolist使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.StandardScaler的用法示例。


在下文中一共展示了StandardScaler.tolist方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: check_clustering

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 别名]
def check_clustering(name, Alg):
    X, y = make_blobs(n_samples=50, random_state=1)
    X, y = shuffle(X, y, random_state=7)
    X = StandardScaler().fit_transform(X)
    n_samples, n_features = X.shape
    # catch deprecation and neighbors warnings
    with warnings.catch_warnings(record=True):
        alg = Alg()
    set_fast_parameters(alg)
    if hasattr(alg, "n_clusters"):
        alg.set_params(n_clusters=3)
    set_random_state(alg)
    if name == 'AffinityPropagation':
        alg.set_params(preference=-100)
        alg.set_params(max_iter=100)

    # fit
    alg.fit(X)
    # with lists
    alg.fit(X.tolist())

    assert_equal(alg.labels_.shape, (n_samples,))
    pred = alg.labels_
    assert_greater(adjusted_rand_score(pred, y), 0.4)
    # fit another time with ``fit_predict`` and compare results
    if name is 'SpectralClustering':
        # there is no way to make Spectral clustering deterministic :(
        return
    set_random_state(alg)
    with warnings.catch_warnings(record=True):
        pred2 = alg.fit_predict(X)
    assert_array_equal(pred, pred2)
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:34,代码来源:estimator_checks.py

示例2: check_transformer_general

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 别名]
def check_transformer_general(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
    _check_transformer(name, Transformer, X.tolist(), y.tolist())
开发者ID:AlexMarshall011,项目名称:scikit-learn,代码行数:9,代码来源:estimator_checks.py

示例3: check_transformer

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 别名]
def check_transformer(name, Transformer):
    if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
        # Those transformers yield non-deterministic output when executed on
        # a 32bit Python. The same transformers are stable on 64bit Python.
        # FIXME: try to isolate a minimalistic reproduction case only depending
        # on numpy & scipy and/or maybe generate a test dataset that does not
        # cause such unstable behaviors.
        msg = name + ' is non deterministic on 32bit Python'
        raise SkipTest(msg)

    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
    _check_transformer(name, Transformer, X.tolist(), y.tolist())
开发者ID:vmuthusamy,项目名称:scikit-learn,代码行数:18,代码来源:estimator_checks.py

示例4: Point

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 别名]
plt.title('Sklearn - estimated number of clusters: %d' % n_clusters_)
plt.grid()

#################################################################################
# our version
#################################################################################
import sys

sys.path.append("../db_scan")
from dbscan import DBscan
from point import Point

#################################################################################
# Compute DBSCAN
#################################################################################
xx = X.tolist()
point_array = []
for x in xx:
    pt = Point(x[0], x[1])
    point_array.append(pt)
# db = DBscan(point_array=parr, start_point_index= 0, cluster_map=clustmap, epsilon=EPS, min_neighbour=MIN_SAMPLES)
db = DBscan()
clusters = db.start(points=point_array, eps=EPS, minPts=MIN_SAMPLES)

#################################################################################
# Plot result
#################################################################################
plt.subplot(212)
colors = plt.cm.Spectral(np.linspace(0, 1, len(clusters)))

for p in point_array:
开发者ID:tstrzeba,项目名称:DBSCAN,代码行数:33,代码来源:scikitresults_test.py

示例5: range

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 别名]
if os.path.exists("log.npy"):
    info = np.load("log.npy")
X=info



# Y = []
# for x in range(0,len(X)):
#   # print X[x][1]
#   if X[x][1]>400:
#     Y.append(X[x])
# X = Y
# print X

X = X.tolist();
# print X
mean =  np.mean(X,axis=0)
print mean
Xlength = len(X)
X_corrd = X
print Xlength
X.append([0,mean[1]])
X.append([1,mean[1]])
X.append([3,mean[1]])
X.append([4,mean[1]])
X.append([5,mean[1]])
X.append([1200,mean[1]])
X.append([1201,mean[1]])
X.append([1202,mean[1]])
X.append([1230,mean[1]])
开发者ID:zzpnm003,项目名称:stress_level,代码行数:32,代码来源:plot_dbscan.py


注:本文中的sklearn.preprocessing.StandardScaler.tolist方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。