當前位置: 首頁>>代碼示例>>Python>>正文


Python StandardScaler.tolist方法代碼示例

本文整理匯總了Python中sklearn.preprocessing.StandardScaler.tolist方法的典型用法代碼示例。如果您正苦於以下問題:Python StandardScaler.tolist方法的具體用法?Python StandardScaler.tolist怎麽用?Python StandardScaler.tolist使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.preprocessing.StandardScaler的用法示例。


在下文中一共展示了StandardScaler.tolist方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: check_clustering

# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 別名]
def check_clustering(name, Alg):
    X, y = make_blobs(n_samples=50, random_state=1)
    X, y = shuffle(X, y, random_state=7)
    X = StandardScaler().fit_transform(X)
    n_samples, n_features = X.shape
    # catch deprecation and neighbors warnings
    with warnings.catch_warnings(record=True):
        alg = Alg()
    set_fast_parameters(alg)
    if hasattr(alg, "n_clusters"):
        alg.set_params(n_clusters=3)
    set_random_state(alg)
    if name == 'AffinityPropagation':
        alg.set_params(preference=-100)
        alg.set_params(max_iter=100)

    # fit
    alg.fit(X)
    # with lists
    alg.fit(X.tolist())

    assert_equal(alg.labels_.shape, (n_samples,))
    pred = alg.labels_
    assert_greater(adjusted_rand_score(pred, y), 0.4)
    # fit another time with ``fit_predict`` and compare results
    if name is 'SpectralClustering':
        # there is no way to make Spectral clustering deterministic :(
        return
    set_random_state(alg)
    with warnings.catch_warnings(record=True):
        pred2 = alg.fit_predict(X)
    assert_array_equal(pred, pred2)
開發者ID:AlexMarshall011,項目名稱:scikit-learn,代碼行數:34,代碼來源:estimator_checks.py

示例2: check_transformer_general

# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 別名]
def check_transformer_general(name, Transformer):
    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
    _check_transformer(name, Transformer, X.tolist(), y.tolist())
開發者ID:AlexMarshall011,項目名稱:scikit-learn,代碼行數:9,代碼來源:estimator_checks.py

示例3: check_transformer

# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 別名]
def check_transformer(name, Transformer):
    if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
        # Those transformers yield non-deterministic output when executed on
        # a 32bit Python. The same transformers are stable on 64bit Python.
        # FIXME: try to isolate a minimalistic reproduction case only depending
        # on numpy & scipy and/or maybe generate a test dataset that does not
        # cause such unstable behaviors.
        msg = name + ' is non deterministic on 32bit Python'
        raise SkipTest(msg)

    X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
                      random_state=0, n_features=2, cluster_std=0.1)
    X = StandardScaler().fit_transform(X)
    X -= X.min()
    _check_transformer(name, Transformer, X, y)
    _check_transformer(name, Transformer, X.tolist(), y.tolist())
開發者ID:vmuthusamy,項目名稱:scikit-learn,代碼行數:18,代碼來源:estimator_checks.py

示例4: Point

# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 別名]
plt.title('Sklearn - estimated number of clusters: %d' % n_clusters_)
plt.grid()

#################################################################################
# our version
#################################################################################
import sys

sys.path.append("../db_scan")
from dbscan import DBscan
from point import Point

#################################################################################
# Compute DBSCAN
#################################################################################
xx = X.tolist()
point_array = []
for x in xx:
    pt = Point(x[0], x[1])
    point_array.append(pt)
# db = DBscan(point_array=parr, start_point_index= 0, cluster_map=clustmap, epsilon=EPS, min_neighbour=MIN_SAMPLES)
db = DBscan()
clusters = db.start(points=point_array, eps=EPS, minPts=MIN_SAMPLES)

#################################################################################
# Plot result
#################################################################################
plt.subplot(212)
colors = plt.cm.Spectral(np.linspace(0, 1, len(clusters)))

for p in point_array:
開發者ID:tstrzeba,項目名稱:DBSCAN,代碼行數:33,代碼來源:scikitresults_test.py

示例5: range

# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import tolist [as 別名]
if os.path.exists("log.npy"):
    info = np.load("log.npy")
X=info



# Y = []
# for x in range(0,len(X)):
#   # print X[x][1]
#   if X[x][1]>400:
#     Y.append(X[x])
# X = Y
# print X

X = X.tolist();
# print X
mean =  np.mean(X,axis=0)
print mean
Xlength = len(X)
X_corrd = X
print Xlength
X.append([0,mean[1]])
X.append([1,mean[1]])
X.append([3,mean[1]])
X.append([4,mean[1]])
X.append([5,mean[1]])
X.append([1200,mean[1]])
X.append([1201,mean[1]])
X.append([1202,mean[1]])
X.append([1230,mean[1]])
開發者ID:zzpnm003,項目名稱:stress_level,代碼行數:32,代碼來源:plot_dbscan.py


注:本文中的sklearn.preprocessing.StandardScaler.tolist方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。