当前位置: 首页>>代码示例>>Python>>正文


Python StandardScaler.inverse_transform方法代码示例

本文整理汇总了Python中sklearn.preprocessing.StandardScaler.inverse_transform方法的典型用法代码示例。如果您正苦于以下问题:Python StandardScaler.inverse_transform方法的具体用法?Python StandardScaler.inverse_transform怎么用?Python StandardScaler.inverse_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.StandardScaler的用法示例。


在下文中一共展示了StandardScaler.inverse_transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_scaler_without_centering

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def test_scaler_without_centering():
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    X_csr = sparse.csr_matrix(X)
    X_csc = sparse.csc_matrix(X)

    assert_raises(ValueError, StandardScaler().fit, X_csr)

    null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
    X_null = null_transform.fit_transform(X_csr)
    assert_array_equal(X_null.data, X_csr.data)
    X_orig = null_transform.inverse_transform(X_null)
    assert_array_equal(X_orig.data, X_csr.data)

    scaler = StandardScaler(with_mean=False).fit(X)
    X_scaled = scaler.transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))

    scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
    X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
    assert_false(np.any(np.isnan(X_csr_scaled.data)))

    scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
    X_csc_scaled = scaler_csr.transform(X_csc, copy=True)
    assert_false(np.any(np.isnan(X_csc_scaled.data)))

    assert_equal(scaler.mean_, scaler_csr.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csr.std_)

    assert_equal(scaler.mean_, scaler_csc.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csc.std_)

    assert_array_almost_equal(
        X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])

    X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
    assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
    assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))

    # Check that X has not been modified (copy)
    assert_true(X_scaled is not X)
    assert_true(X_csr_scaled is not X_csr)

    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_true(X_scaled_back is not X)
    assert_true(X_scaled_back is not X_scaled)
    assert_array_almost_equal(X_scaled_back, X)

    X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
    assert_true(X_csr_scaled_back is not X_csr)
    assert_true(X_csr_scaled_back is not X_csr_scaled)
    assert_array_almost_equal(X_csr_scaled_back.toarray(), X)

    X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
    assert_true(X_csc_scaled_back is not X_csc)
    assert_true(X_csc_scaled_back is not X_csc_scaled)
    assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
开发者ID:abouaziz,项目名称:scikit-learn,代码行数:61,代码来源:test_preprocessing.py

示例2: knn_max_density

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
    def knn_max_density(self, X, n_neighbors, step):

        ss = StandardScaler()
        ss.fit(X)
        X_standart = ss.transform(X)

        passed_points_indeces = range(len(X_standart))
        X_passed_standart = X_standart

        while len(X_passed_standart) > n_neighbors:

            knn = NearestNeighbors(n_neighbors=n_neighbors, leaf_size=100)
            knn.fit(X_passed_standart)
            knn_dists, knn_indeces = knn.kneighbors()

            knn_dists_mean = knn_dists.mean(axis=1)

            n_points = max(1, int(step * len(X_passed_standart)))
            passed_points_indeces = knn_dists_mean.argsort()[:-n_points]
            knn_dists_mean.sort()

            X_passed_standart = X_passed_standart[passed_points_indeces]
            
        X_passed = ss.inverse_transform(X_passed_standart)

        return X_passed
开发者ID:hushchyn-mikhail,项目名称:ship_tracks_recognition,代码行数:28,代码来源:regressor_old.py

示例3: PoissonRegression

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
class PoissonRegression(Regressor):
    """
    calulate the solution using the Newton-Raphson formula(second order optimization). This method has a advantage that its weight update rule needs no learning rate alpha. And it convages quickly.
    """
    def __init__( self, features = range(231) ):
        Regressor.__init__(self)
        self.features = features
        self.weights = np.ones(len(features))
        self.xscaler = StandardScaler()
        self.yscaler = StandardScaler()

    def learn(self, Xtrain, ytrain):
        Xless = Xtrain[:, self.features]
        self.xscaler.fit(Xless)
        Xless = self.xscaler.transform(Xless)
        self.yscaler.fit(ytrain)
        ytrain = self.yscaler.transform(ytrain)
        itertimes = 20
        for i in range(itertimes):
            c = np.exp(np.dot(Xless, self.weights))
            gradient = np.dot(Xless.T, (ytrain - c))
            neg_hessian = np.dot(Xless.T, np.dot(np.diag(c), Xless))
            self.weights = self.weights + np.dot(np.linalg.inv(neg_hessian), gradient)

    def predict(self, Xtest):
        Xless = Xtest[:, self.features]
        Xless = self.xscaler.transform(Xless)
        ytest = np.exp(np.dot(Xless, self.weights))
        ytest = self.yscaler.inverse_transform(ytest)
        return ytest
开发者ID:haoopeng,项目名称:MLAlgorithms,代码行数:32,代码来源:algorithms.py

示例4: test_scaler_1d

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def test_scaler_1d():
    """Test scaling of dataset along single axis"""
    rng = np.random.RandomState(0)
    X = rng.randn(5)
    X_orig_copy = X.copy()

    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=False)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)

    # check inverse transform
    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_array_almost_equal(X_scaled_back, X_orig_copy)

    # Test with 1D list
    X = [0., 1., 2, 0.4, 1.]
    scaler = StandardScaler()
    X_scaled = scaler.fit(X).transform(X, copy=False)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)

    X_scaled = scale(X)
    assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
    assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
开发者ID:MarkyV,项目名称:scikit-learn,代码行数:27,代码来源:test_preprocessing.py

示例5: background_model

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def background_model(x_train, method='mean', n_components=10):
	"""
	use data from x_train to create a model/image of the background
	:param x_train: a matrix with 1 row per image frame, each column represents a pixel
		PCA is trained on this data
	:return: a vector that represents the background image
	"""
	# clean the data before pca and clustering (subtract mean, divide by st. dev.)
	scaler = StandardScaler().fit(x_train)
	x_train = scaler.transform(x_train)
	# use SVD instead of PCA, so that don't need to compute covariance
	eig = TruncatedSVD(n_components=n_components).fit(x_train)
	print sum(eig.explained_variance_ratio_)
	train = eig.transform(x_train)

	# define background as an aggregation of each pixel value in the principal component space
	# can't see much of a difference between mean and median
	if method == 'median':
		back_pca = np.median(train, axis=0)
	elif method == 'mean':
		back_pca = np.mean(train, axis=0)
	else:
		print "method must either be 'median' or 'mean'"
		return 1

	# transform to full sized matrix
	back_vec = eig.inverse_transform(back_pca)
	# add mean and variance back in
	back_vec = scaler.inverse_transform(back_vec)
	return back_vec
开发者ID:robert-giaquinto,项目名称:cs5512_project,代码行数:32,代码来源:background_subtraction.py

示例6: get_track_params

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
    def get_track_params(self, X):

        ss = StandardScaler()
        ss.fit(X)

        transformed_tracks = ss.transform(X).mean(axis=0)
        tracks = ss.inverse_transform(transformed_tracks)

        return tracks, X.std(axis=0)
开发者ID:hushchyn-mikhail,项目名称:ship_tracks_recognition,代码行数:11,代码来源:regressor.py

示例7: GmmInterest

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
class GmmInterest(InterestModel):
    def __init__(self, conf, expl_dims, measure, n_samples=40, n_components=6, update_frequency=10):
        InterestModel.__init__(self, expl_dims)

        self.measure = measure
        self.bounds = conf.bounds[:, expl_dims]
        self.n_components = n_components
        self.scale_t = 1  # 1. / n_samples
        self.t = -self.scale_t * n_samples
        self.scale_x = conf.bounds[1, expl_dims] - conf.bounds[0, expl_dims]
        self.scale_measure = abs(measure(numpy.zeros_like(conf.bounds[0, :]), numpy.zeros_like(conf.bounds[0])))

        self.data = numpy.zeros((n_samples, len(expl_dims) + 2))
        self.n_samples = n_samples
        self.scaler = StandardScaler()
        self.update_frequency = update_frequency

        for _ in range(n_samples):
            self.update(rand_bounds(conf.bounds), rand_bounds(conf.bounds))

    def sample(self):
        x = self.gmm_choice.sample()
        x = self.scaler.inverse_transform(numpy.hstack(([0.0], x.flatten(), [0.0])))[1:-1]
        x = numpy.maximum(x, self.bounds[0, :])
        x = numpy.minimum(x, self.bounds[1, :])
        return x.T

    def update(self, xy, ms):
        measure = self.measure(xy, ms)
        self.data[self.t % self.n_samples, 0] = self.t
        self.data[self.t % self.n_samples, -1] = measure
        self.data[self.t % self.n_samples, 1:-1] = xy.flatten()[self.expl_dims]

        self.t += self.scale_t
        if self.t >= 0:
            if self.t % self.update_frequency == 0:
                self.update_gmm()

        return self.t, xy.flatten()[self.expl_dims], measure

    def update_gmm(self):
        scaled_data = self.scaler.fit_transform(self.data)

        self.gmm = GMM(n_components=self.n_components, covariance_type="full")
        self.gmm.fit(numpy.array(scaled_data))
        self.gmm_choice = self.gmm_interest()

    def gmm_interest(self):
        cov_t_c = numpy.array([self.gmm.covars_[k, 0, -1] for k in range(self.gmm.n_components)])
        cov_t_c = numpy.exp(cov_t_c)
        # cov_t_c[cov_t_c <= 1e-100] = 1e-100

        gmm_choice = self.gmm.inference([0], range(1, len(self.expl_dims) + 1), [1.0])
        gmm_choice.weights_ = cov_t_c
        gmm_choice.weights_ /= numpy.array(gmm_choice.weights_).sum()

        return gmm_choice
开发者ID:jgrizou,项目名称:explauto,代码行数:59,代码来源:gmm_progress.py

示例8: main

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def main():

    df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data',
            header = None,
            sep = '\s+')
    df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM',
            'AGE', 'DIS', 'RAD', 'TAX', 'PTRATIO', 'B',
            'LSTAT', 'MEDV']
    print(df.head())

    # Select a subset of the features and plot the correlation between features
    cols = ['LSTAT', 'INDUS', 'NOX', 'RM', 'MEDV']
    sns.pairplot(df[cols], size=2.5);
    plt.title('Correlations between 5 features')
    plt.show()

    # Plot a heatmap of the same subset of features
    cm = np.corrcoef(df[cols].values.T)
    sns.set(font_scale=2.5)
    hm = sns.heatmap(cm,
            cbar = True,
            annot = True,
            square = True,
            fmt = '.2f',
            annot_kws = {'size': 15},
            yticklabels = cols,
            xticklabels = cols)
    plt.show()

    X = df[['RM']].values
    y = df['MEDV'].values

    sc_x = StandardScaler()
    sc_y = StandardScaler()

    X_std = sc_x.fit_transform(X)
    y_std = sc_y.fit_transform(y)
    
    lr = LinearRegressionGD()
    lr.fit(X_std, y_std)

    plt.plot(range(1, lr.n_iter + 1), lr.cost_)
    plt.ylabel('SSE')
    plt.xlabel('Epoch')
    plt.show()

    lin_regplot(X_std, y_std, lr)
    plt.xlabel('Average number of rooms [RM] (standardized)')
    plt.ylabel('Price in $1000\'s [MEDV] (standardized)')
    plt.show()
    
    # Example classification for a house with 5 rooms
    num_rooms_std = sc_x.transform([5.0])
    price_std = lr.predict(num_rooms_std)
    print("Price in $1000's: %.3f" % \
            sc_y.inverse_transform(price_std))
开发者ID:southpaw94,项目名称:MachineLearning,代码行数:58,代码来源:housing.py

示例9: clusterThose

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def clusterThose(G,eps=0.1,min_samples=4):
    ''' Scale the data and cluster'''
    scaler = StandardScaler(copy=True)
    X_centered = scaler.fit(G).transform(G)
    db = DBSCAN(eps, min_samples).fit( X_centered )
    core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
    core_samples_mask[db.core_sample_indices_] = True
    labels = db.labels_
    n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
    X = scaler.inverse_transform(X_centered)
    return X, n_clusters_, labels, core_samples_mask
开发者ID:warmlogic,项目名称:foodGrouper,代码行数:13,代码来源:foodgroups.py

示例10: kmeans_fitting

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def kmeans_fitting(rows, train):
    x = get_feature_vector(rows, train)
    scaler = StandardScaler()
    scaler.fit(x)
    x = scaler.transform(x)
    model = cluster.MiniBatchKMeans(n_clusters = 6)
    model.fit(x)
    centers = model.cluster_centers_
    print centers
    centers = scaler.inverse_transform(centers)
    print centers
    return model, scaler
开发者ID:jingdai2014,项目名称:poncho-weather-clustering,代码行数:14,代码来源:clustering.py

示例11: DAEGO

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def DAEGO(X_s,H,P,batch_range):
	"""
	Parameters
	----------

	X_s: small class features

	H : layers (first layers shoud have same neurons as number of features)

	P : percent oversampling

	batch_range : size of minibatch


	Returns
	-------

	syn_Z: synthetic sample with same number of features as smaller class
	"""

	#normalization
	scaler=StdScaler()
	x_tr=scaler.fit_transform(X_s.astype(float))
	x_norm=norm(x_tr,axis=0)

	n_samples=int(X_s.shape[0]*P/100)
	print "generating %d samples" %(n_samples)

	norm_param=[LA.norm(x) for x in x_tr.T]
	X_init=np.random.standard_normal(size=(n_samples,X_s.shape[1]))
	x_init_tr=scaler.transform(X_init)
	x_ini_norm=norm(x_init_tr)
	ae=autoencoder(dimensions=H)
	learning_rate = 0.001
	optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
	sess = tf.Session()
	sess.run(tf.initialize_all_variables())
	n_epoch=100
	for epoch_i in range(n_epoch):
	    for start, end in zip(range(0, len(x_norm), batch_range),range(batch_range, len(x_norm), batch_range)):
	        input_ = x_norm[start:end]
	        sess.run(optimizer, feed_dict={ae['x']: input_, ae['corrupt_prob']: [1.0]})
	    s="\r Epoch: %d Cost: %f"%(epoch_i, sess.run(ae['cost'], 
	    	feed_dict={ae['x']: X_s, ae['corrupt_prob']: [1.0]}))
	    stderr.write(s)
	    stderr.flush()
	x_init_encoded = sess.run(ae['y'], feed_dict={ae['x']: x_ini_norm, ae['corrupt_prob']: [0.0]})
	sess.close()
	x_init_norminv=np.multiply(x_init_encoded,norm_param)
	syn_Z=scaler.inverse_transform(x_init_norminv)
	return syn_Z
开发者ID:nthakor,项目名称:imbalance_algorithms,代码行数:53,代码来源:daego.py

示例12: test_scaler_without_centering

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def test_scaler_without_centering():
    rng = np.random.RandomState(42)
    X = rng.randn(4, 5)
    X[:, 0] = 0.0  # first feature is always of zero
    X_csr = sp.csr_matrix(X)

    scaler = StandardScaler(with_mean=False).fit(X)
    X_scaled = scaler.transform(X, copy=True)
    assert_false(np.any(np.isnan(X_scaled)))

    scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
    X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
    assert_false(np.any(np.isnan(X_csr_scaled.data)))

    assert_equal(scaler.mean_, scaler_csr.mean_)
    assert_array_almost_equal(scaler.std_, scaler_csr.std_)

    assert_array_almost_equal(
        X_scaled.mean(axis=0), [0., -0.01,  2.24, -0.35, -0.78], 2)
    assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])

    X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
    assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
    assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))

    # Check that X has not been modified (copy)
    assert_true(X_scaled is not X)
    assert_true(X_csr_scaled is not X_csr)

    X_scaled_back = scaler.inverse_transform(X_scaled)
    assert_true(X_scaled_back is not X)
    assert_true(X_scaled_back is not X_scaled)
    assert_array_almost_equal(X_scaled_back, X)

    X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
    assert_true(X_csr_scaled_back is not X_csr)
    assert_true(X_csr_scaled_back is not X_csr_scaled)
    assert_array_almost_equal(X_scaled_back, X)
开发者ID:Lurunchik,项目名称:scikit-learn,代码行数:40,代码来源:test_preprocessing.py

示例13: InputScaler

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
class InputScaler():

    def __init__(self):
        self.scaler = StandardScaler()
        
    def fit_transform(self, data):
        flat = numpy.vstack(data)
        self.scaler.fit(flat)
        return [ self.scaler.transform(X) for X in data ]
    
    def transform(self, data):
        return [ self.scaler.transform(X) for X in data ]
    
    def inverse_transform(self, data):
        return [ self.scaler.inverse_transform(X) for X in data ]
开发者ID:gchrupala,项目名称:reimaginet,代码行数:17,代码来源:simple_data.py

示例14: submit

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
def submit(args):
    """Run train-test experiment. """
    data = load_data(args['--data'])
    X_train = data['X_train']
    y_train = data['y_train']

    X_test = data['X_test']

    est = GradientBoostingRegressor(n_estimators=2000, verbose=1, max_depth=6,
                                    min_samples_leaf=9, learning_rate=0.02,
                                    max_features=33, random_state=1,
                                    subsample=1.0,
                                    loss='lad')

    model_cls = MODELS[args['<model>']]
    model = model_cls(est=est,
                      with_stationinfo=True,
                      with_date=True, with_solar=True,
                      with_mask=True,
                      intp_blocks=('nm_intp', 'nmft_intp', 'nm_intp_sigma'),
                      )

    print('_' * 80)
    print('Submit')
    print
    print model
    print
    print

    scaler = StandardScaler()
    if args['--scaley']:
        y_train = scaler.fit_transform(y_train.copy())

    t0 = time()
    model.fit(X_train, y_train)
    print('model.fit took %.fm' % ((time() - t0) / 60.))
    pred = model.predict(X_test)
    if args['--scaley']:
        pred = scaler.inverse_transform(pred)

    data = load_data(args['--data'])
    date_idx = data['X_test'].date
    date_idx = date_idx.map(lambda x: x.strftime('%Y%m%d'))
    stid = pd.read_csv('data/station_info.csv')['stid']
    out = pd.DataFrame(index=date_idx, columns=stid, data=pred)
    out.index.name = 'Date'
    out.to_csv('hk_19.csv')
    IPython.embed()
开发者ID:mhdella,项目名称:kaggle-solar-energy,代码行数:50,代码来源:run.py

示例15: knn_max_density

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import inverse_transform [as 别名]
    def knn_max_density(self, X, n_neighbors, step):

        # ss = StandardScaler()
        # ss.fit(X)
        # X_standart = ss.transform(X)
        #
        # passed_points_indeces = range(len(X_standart))
        # X_passed_standart = X_standart
        #
        # while len(X_passed_standart) > n_neighbors:
        #
        #     knn = NearestNeighbors(n_neighbors=n_neighbors, leaf_size=100)
        #     knn.fit(X_passed_standart)
        #     knn_dists, knn_indeces = knn.kneighbors()
        #
        #     knn_dists_mean = knn_dists.mean(axis=1)
        #
        #     n_points = max(1, int(step * len(X_passed_standart)))
        #     passed_points_indeces = knn_dists_mean.argsort()[:-n_points]
        #     knn_dists_mean.sort()
        #
        #     X_passed_standart = X_passed_standart[passed_points_indeces]
        #
        # X_passed = ss.inverse_transform(X_passed_standart)

        ss = StandardScaler()
        ss.fit(X)
        X_standart = ss.transform(X)

        passed_points_indeces = range(len(X_standart))
        X_passed_standart = X_standart

        n_neighbors = min(n_neighbors, len(X_passed_standart) - 1)
        knn = NearestNeighbors(n_neighbors=n_neighbors, leaf_size=100)
        knn.fit(X_passed_standart)
        knn_dists, knn_indeces = knn.kneighbors()

        knn_dists_mean = knn_dists.mean(axis=1)

        max_dense_point = knn_dists_mean.argsort()[0]

        passed_points_indeces = list(knn_indeces[max_dense_point]) + [max_dense_point]

        X_passed_standart = X_passed_standart[passed_points_indeces]

        X_passed = ss.inverse_transform(X_passed_standart)

        return X_passed
开发者ID:hushchyn-mikhail,项目名称:ship_tracks_recognition,代码行数:50,代码来源:regressor.py


注:本文中的sklearn.preprocessing.StandardScaler.inverse_transform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。