本文整理匯總了Python中sklearn.preprocessing.StandardScaler.reshape方法的典型用法代碼示例。如果您正苦於以下問題:Python StandardScaler.reshape方法的具體用法?Python StandardScaler.reshape怎麽用?Python StandardScaler.reshape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.preprocessing.StandardScaler
的用法示例。
在下文中一共展示了StandardScaler.reshape方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: linregress
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 別名]
def linregress(X_train, X_test, y_train, y_test):
coef = []
for col in X_train.columns.tolist():
X = StandardScaler().fit_transform(X_train[col])
lr = LinearRegression()
lr.fit(X.reshape(-1, 1), y_train)
coef.append([col, lr.coef_])
coef = sorted(coef, key=lambda x: x[1])[::-1]
nos = [x[1] for x in coef]
labs = [x[0] for x in coef]
for lab in labs:
if lab == 'doubles':
labs[labs.index(lab)] = '2B'
elif lab == 'triples':
labs[labs.index(lab)] = '3B'
elif lab == 'Intercept':
idx = labs.index('Intercept')
labs.pop(idx)
nos.pop(idx)
labs = [lab.upper() for lab in labs]
x = range(len(nos))
plt.plot(x,nos, lw=2, c='b')
plt.xticks(x, labs)
plt.title('Linear Regression Coefficients (Win Percentage)')
plt.savefig('images/coefficients.png')
plt.show()
print labs
示例2: guess_match_clf
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 別名]
def guess_match_clf(data_set,clf,week_info):
week = int(week_info[3])
team = get_team(data_set, week_info[0], week_info[2])
opp = get_team(data_set, week_info[1], week_info[2])
prev_week = []
for item in team.cur_stats(week):
prev_week.append(team.cur_stats(week)[item])
for item in opp.cur_stats(week):
prev_week.append(opp.cur_stats(week)[item])
prev_week = np.asarray(prev_week)
prev_week = StandardScaler().fit_transform(prev_week)
print week_info
print clf.predict_proba(data_scale(prev_week.reshape(1,-1)).reshape(1,-1) )
print
示例3: print
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 別名]
# In[ ]:
# ERROR: bad alloc, why?
#print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, yy: mnist.test.labels, keep_prob: 1.0}))
#test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, yy: mnist.test.labels})
#print("test_accuracy ")
#print(test_accuracy)
print("read data")
test = pd.read_csv('./data/test.csv')
print("predict")
# Convert the dataframe to a numpy array
test_data = StandardScaler().fit_transform(np.float32(test.values))
# Reshape the data into 42000 2d images
test_data = test_data.reshape(-1, 28, 28, 1)
test_pred = session.run(prediction, feed_dict={x_image:test_data})
test_labels = np.argmax(test_pred, axis=1)
print("plot")
k = 0 # Try different image indices k
print("Label Prediction: %i"%test_labels[k])
fig = plt.figure(figsize=(2,2)); plt.axis('off')
plt.imshow(test_data[k,:,:,0]); plt.show()
print ("done")
# clean
#session.close()
示例4: _fit
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 別名]
def _fit(self, X, y, features_names=None, preload_features=None,
ranking_th=0.005):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
multi_output=True)
# Initialization
n_features = X.shape[1]
features = np.arange(n_features)
cv = self.cv
cv = check_cv(cv, y, classifier=is_classifier(self.estimator))
if sklearn.__version__ == '0.17':
n_splits = cv.n_folds
else:
n_splits = cv.get_n_splits(X, y)
if self.verbose > 0:
print("Fitting {0} folds for each of iteration".format(n_splits))
if 0.0 < self.n_features_step < 1.0:
step = int(max(1, self.n_features_step * n_features))
else:
step = int(self.n_features_step)
if step <= 0:
raise ValueError("Step must be >0")
if features_names is not None:
features_names = np.array(features_names)
else:
if self.features_names is not None:
features_names = self.features_names
else:
features_names = np.arange(n_features) # use indices
tentative_support_ = np.zeros(n_features, dtype=np.bool)
current_support_ = np.zeros(n_features, dtype=np.bool)
self.scores_ = []
self.scores_confidences_ = []
self.features_per_it_ = []
if preload_features is not None:
preload_features = np.unique(preload_features).astype('int')
current_support_[preload_features] = True
X_selected = X[:, features[current_support_]]
y_hat, cv_scores = my_cross_val_predict(clone(self.estimator),
X_selected, y, cv=cv)
target = y - y_hat
else:
target = y.copy()
score, confidence_interval = -np.inf, 0
proceed = np.sum(current_support_) < X.shape[1]
while proceed:
if self.verbose > 0:
print('\nN-times variance of target: {}'.format(
target.var() * target.shape[0]))
# update values
old_confidence_interval = confidence_interval
old_score = score
if self.scale:
target = StandardScaler().fit_transform(target.reshape(
-1, 1)).ravel()
# target = MinMaxScaler().fit_transform(target.reshape(
# -1,1)).ravel()
if self.verbose > 0:
print()
print('Feature ranking')
print()
print("target shape: {}".format(target.shape))
print()
# Rank the remaining features
start_t = time.time()
rank_estimator = clone(self.estimator)
rank_estimator.fit(X, target)
end_fit = time.time() - start_t
# Get coefs
start_t = time.time()
if hasattr(rank_estimator, 'coef_'):
coefs = rank_estimator.coef_
elif hasattr(rank_estimator, 'feature_importances_'):
coefs = rank_estimator.feature_importances_
else:
raise RuntimeError('The classifier does not expose '
'"coef_" or "feature_importances_" '
'attributes')
end_rank = time.time() - start_t
# Get ranks by ordering in ascending way
if coefs.ndim > 1:
ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
coefs = coefs.sum(axis=0)
else:
ranks = np.argsort(safe_sqr(coefs))
# for sparse case ranks is matrix
#.........這裏部分代碼省略.........
示例5: numeric_fillna_standardize
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 別名]
def numeric_fillna_standardize(x):
x = x.cat.codes
x.loc[x==-1] = x[x!=-1].mean()
x = StandardScaler().fit_transform(x.reshape(-1, 1))
return x