本文整理汇总了Python中sklearn.preprocessing.MinMaxScaler.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python MinMaxScaler.reshape方法的具体用法?Python MinMaxScaler.reshape怎么用?Python MinMaxScaler.reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.preprocessing.MinMaxScaler
的用法示例。
在下文中一共展示了MinMaxScaler.reshape方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_data
# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import reshape [as 别名]
def make_data(n_samples=1000, n_features=1, n_targets=1, informative_prop=1.0,
noise=0.0, test_prop=0.1, valid_prop=0.3, method='linear'):
if method == 'linear':
params = dict(n_features=n_features,
n_informative=int(n_features*informative_prop),
noise=noise,
n_targets=n_targets,
n_samples=n_samples,
shuffle=False,
bias=0.0)
X, Y = make_regression(**params)
elif method == 'boston':
boston = load_boston()
X = boston.data
Y = boston.target
else:
params = dict(n_samples=n_samples,
n_features=n_features)
X, Y = make_friedman3(n_samples=n_samples, n_features=n_features,
noise=noise)
X = MinMaxScaler(feature_range=(0.0,1.0)).fit_transform(X)
X = X.astype(theano.config.floatX)
Y = MinMaxScaler(feature_range=(0.0,1.0)).fit_transform(Y)
Y = Y.astype(theano.config.floatX)
if len(X.shape) > 1:
n_features = X.shape[1]
else:
X = X.reshape(X.shape[0], -1)
n_features = 1
if len(Y.shape) > 1:
n_targets = Y.shape[1]
else:
Y = Y.reshape(Y.shape[0], -1)
n_targets = 1
X_train, Y_train, X_valid, Y_valid, X_test, Y_test = \
train_valid_test_split(X, Y,
test_prop=valid_prop, valid_prop=valid_prop)
return dict(
X_train=theano.shared(X_train),
Y_train=theano.shared(Y_train),
X_valid=theano.shared(X_valid),
Y_valid=theano.shared(Y_valid),
X_test=theano.shared(X_test),
Y_test=theano.shared(Y_test),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
num_examples_test=X_test.shape[0],
input_dim=n_features,
output_dim=n_targets)
示例2: fit
# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import reshape [as 别名]
def fit(self, X):
self.n, self.d = X.shape[0], X.shape[1]
out_scores = np.zeros([self.n, self.d])
hist = np.zeros([self.bins, self.d])
bin_edges = np.zeros([self.bins + 1, self.d])
# build the bins
for i in range(self.d):
hist[:, i], bin_edges[:, i] = np.histogram(X[:, i], bins=self.bins,
density=True)
# check the integrity
assert (
math.isclose(np.sum(hist[:, i] * np.diff(bin_edges[:, i])), 1))
# calculate the threshold
for i in range(self.d):
# find histogram assignments of data points
bin_ind = np.digitize(X[:, i], bin_edges[:, i], right=False)
# very important to do scaling. Not necessary to use min max
out_score = np.max(hist[:, i]) - hist[:, i]
out_score = MinMaxScaler().fit_transform(out_score.reshape(-1, 1))
for j in range(self.n):
# out sample left
if bin_ind[j] == 0:
dist = np.abs(X[j, i] - bin_edges[0, i])
bin_width = bin_edges[1, i] - bin_edges[0, i]
# assign it to bin 0
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j]]
else:
out_scores[j, i] = np.max(out_score)
# out sample right
elif bin_ind[j] == bin_edges.shape[0]:
dist = np.abs(X[j, i] - bin_edges[-1, i])
bin_width = bin_edges[-1, i] - bin_edges[-2, i]
# assign it to bin k
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j] - 2]
else:
out_scores[j, i] = np.max(out_score)
else:
out_scores[j, i] = out_score[bin_ind[j] - 1]
out_scores_sum = np.sum(out_scores, axis=1)
self.threshold = scoreatpercentile(out_scores_sum,
100 * (1 - self.contamination))
self.hist = hist
self.bin_edges = bin_edges
self.decision_scores = out_scores_sum
self.y_pred = (self.decision_scores > self.threshold).astype('int')
self.mu = np.mean(self.decision_scores)
self.sigma = np.std(self.decision_scores)
示例3: decision_function
# 需要导入模块: from sklearn.preprocessing import MinMaxScaler [as 别名]
# 或者: from sklearn.preprocessing.MinMaxScaler import reshape [as 别名]
def decision_function(self, X_test):
n_test = X_test.shape[0]
out_scores = np.zeros([n_test, self.d])
for i in range(self.d):
# find histogram assignments of data points
bin_ind = np.digitize(X_test[:, i], self.bin_edges[:, i],
right=False)
# very important to do scaling. Not necessary to use minmax
out_score = np.max(self.hist[:, i]) - self.hist[:, i]
out_score = MinMaxScaler().fit_transform(out_score.reshape(-1, 1))
for j in range(n_test):
# out sample left
if bin_ind[j] == 0:
dist = np.abs(X_test[j, i] - self.bin_edges[0, i])
bin_width = self.bin_edges[1, i] - self.bin_edges[0, i]
# assign it to bin 0
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j]]
else:
out_scores[j, i] = np.max(out_score)
# out sample right
elif bin_ind[j] == self.bin_edges.shape[0]:
dist = np.abs(X_test[j, i] - self.bin_edges[-1, i])
bin_width = self.bin_edges[-1, i] - self.bin_edges[-2, i]
# assign it to bin k
if dist < bin_width * self.beta:
out_scores[j, i] = out_score[bin_ind[j] - 2]
else:
out_scores[j, i] = np.max(out_score)
else:
out_scores[j, i] = out_score[bin_ind[j] - 1]
out_scores_sum = np.sum(out_scores, axis=1)
return out_scores_sum