本文整理汇总了Python中sklearn.preprocessing.Scaler.inverse_transform方法的典型用法代码示例。如果您正苦于以下问题:Python Scaler.inverse_transform方法的具体用法?Python Scaler.inverse_transform怎么用?Python Scaler.inverse_transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing.Scaler
的用法示例。
在下文中一共展示了Scaler.inverse_transform方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_scaler_without_centering
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler(with_mean=False)
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
示例2: test_scaler_1d
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
def test_scaler_1d():
"""Test scaling of dataset along single axis"""
rng = np.random.RandomState(0)
X = rng.randn(5)
X_orig_copy = X.copy()
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
示例3: test_scaler_without_centering
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sp.csr_matrix(X)
scaler = Scaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = Scaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
assert_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.std_, scaler_csr.std_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis0(X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_scaled_back, X)
示例4: test_scaler_2d_arrays
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
def test_scaler_2d_arrays():
"""Test scaling of 2d array along first axis"""
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
示例5: test_scaler
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
def test_scaler():
"""Test scaling of dataset along all axis"""
# First test with 1D data
X = np.random.randn(5)
X_orig_copy = X.copy()
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X_orig_copy)
# Test with 1D list
X = [0., 1., 2, 0.4, 1.]
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(axis=0), 0.0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.0)
# Test with 2D data
X = np.random.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), 4 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), 4 * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = np.random.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = Scaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
示例6: KMPBase
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
#.........这里部分代码省略.........
filter_params=True, n_jobs=self.n_jobs,
**self._kernel_params())
if self.verbose: print "Done in", time.time() - start, "seconds"
if self.scale:
if self.verbose: print "Scaling dictionary"
start = time.time()
copy = True if self.metric == "precomputed" else False
self.scaler_ = Scaler(copy=copy)
K = self.scaler_.fit_transform(K)
if self.verbose: print "Done in", time.time() - start, "seconds"
# FIXME: this allocates a lot of intermediary memory
norms = np.sqrt(np.sum(K ** 2, axis=0))
return n_nonzero_coefs, K, y, norms
def _fit_multi(self, K, y, Y, n_nonzero_coefs, norms):
if self.verbose: print "Starting training..."
start = time.time()
coef = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_run_iterator)(self._get_estimator(),
self._get_loss(),
K, Y[:, i], n_nonzero_coefs, norms,
self.n_refit, self.check_duplicates)
for i in xrange(Y.shape[1]))
self.coef_ = np.array(coef)
if self.verbose: print "Done in", time.time() - start, "seconds"
def _score(self, y_true, y_pred):
if self.score_func == "auc":
return auc(y_true, y_pred)
if hasattr(self, "lb_"):
y_pred = self.lb_.inverse_transform(y_pred, threshold=0.5)
if self.score_func is None:
return np.mean(y_true == y_pred)
else:
return self.score_func(y_true, y_pred)
else:
# FIXME: no need to ravel y_pred if y_true is 2d!
return -np.mean((y_true - y_pred.ravel()) ** 2)
def _fit_multi_with_validation(self, K, y, Y, n_nonzero_coefs, norms):
iterators = [FitIterator(self._get_estimator(), self._get_loss(),
K, Y[:, i], n_nonzero_coefs, norms,
self.n_refit, self.check_duplicates,
self.verbose)
for i in xrange(Y.shape[1])]
if self.verbose: print "Computing validation dictionary..."
start = time.time()
K_val = pairwise_kernels(self.X_val, self.components_,
metric=self.metric,
filter_params=True,
n_jobs=self.n_jobs,
**self._kernel_params())
if self.verbose: print "Done in", time.time() - start, "seconds"
if self.scale:
K_val = self.scaler_.transform(K_val)
y_val = self.y_val
if self.scale_y:
y_val = self.y_scaler_.transform(y_val)
if self.verbose: print "Starting training..."
示例7: selection
# 需要导入模块: from sklearn.preprocessing import Scaler [as 别名]
# 或者: from sklearn.preprocessing.Scaler import inverse_transform [as 别名]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection (find the 50% most discriminative channels)
fs.fit(Xtrain, ytrain) # find
Xtrain = fs.transform(Xtrain) # remove unnecessary channels
# normalization
scaler.fit(Xtrain) # find
Xtrain = scaler.transform(Xtrain) # apply zscore
# SVM fit
clf.fit(Xtrain, ytrain, sample_weight=sw_train)
# retrieve hyperplan feature identification
coef[split, fold, d, :, :] = 0 # initialize
# --- univariate
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# --- multivariate
coef[split, fold, d, :, uni_features] = scaler.inverse_transform(clf.coef_).T
# predict cross val (deal with NaN in testing)
# generalize across all time points
for d_tg in range(0, n_dims_tg):
sys.stdout.write("*")
sys.stdout.flush()
# select data
Xtest = Xm_shfl[test, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# preproc
Xtest = fs.transform(Xtest)
Xtest = scaler.transform(Xtest)
# predict
if (Xtest.shape[0] - np.sum(test_nan)) > 0: