当前位置: 首页>>代码示例>>Python>>正文


Python StandardScaler.reshape方法代码示例

本文整理汇总了Python中sklearn.preprocessing.StandardScaler.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python StandardScaler.reshape方法的具体用法?Python StandardScaler.reshape怎么用?Python StandardScaler.reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.preprocessing.StandardScaler的用法示例。


在下文中一共展示了StandardScaler.reshape方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: linregress

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 别名]
def linregress(X_train, X_test, y_train, y_test):
    coef = []
    for col in X_train.columns.tolist():
        X = StandardScaler().fit_transform(X_train[col])
        lr = LinearRegression()
        lr.fit(X.reshape(-1, 1), y_train)
        coef.append([col, lr.coef_])
    coef = sorted(coef, key=lambda x: x[1])[::-1]
    nos = [x[1] for x in coef]
    labs = [x[0] for x in coef]
    for lab in labs:
        if lab == 'doubles':
            labs[labs.index(lab)] = '2B'
        elif lab == 'triples':
            labs[labs.index(lab)] = '3B'
        elif lab == 'Intercept':
            idx = labs.index('Intercept')
            labs.pop(idx)
            nos.pop(idx)
    labs = [lab.upper() for lab in labs]
    x = range(len(nos))
    plt.plot(x,nos, lw=2, c='b')
    plt.xticks(x, labs)
    plt.title('Linear Regression Coefficients (Win Percentage)')
    plt.savefig('images/coefficients.png')
    plt.show()
    print labs
开发者ID:blemi4,项目名称:p2-baseball,代码行数:29,代码来源:baseball.py

示例2: guess_match_clf

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 别名]
def guess_match_clf(data_set,clf,week_info):
    week = int(week_info[3])
    team = get_team(data_set, week_info[0], week_info[2])
    opp = get_team(data_set, week_info[1], week_info[2])
    prev_week = []
    for item in team.cur_stats(week):
        prev_week.append(team.cur_stats(week)[item])
    for item in opp.cur_stats(week):
        prev_week.append(opp.cur_stats(week)[item])
    prev_week = np.asarray(prev_week)
    prev_week = StandardScaler().fit_transform(prev_week)
    print week_info
    print clf.predict_proba(data_scale(prev_week.reshape(1,-1)).reshape(1,-1) )
    print
开发者ID:rsimmons1,项目名称:Nfl_predict,代码行数:16,代码来源:analysis.py

示例3: print

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 别名]

# In[ ]:

# ERROR: bad alloc, why?
#print("test accuracy %g"%accuracy.eval(feed_dict={x: mnist.test.images, yy: mnist.test.labels, keep_prob: 1.0}))
#test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, yy: mnist.test.labels})
#print("test_accuracy ")
#print(test_accuracy)

print("read data")
test = pd.read_csv('./data/test.csv')

print("predict")
# Convert the dataframe to a numpy array
test_data = StandardScaler().fit_transform(np.float32(test.values))
 # Reshape the data into 42000 2d images
test_data = test_data.reshape(-1, 28, 28, 1)
test_pred = session.run(prediction, feed_dict={x_image:test_data})
test_labels = np.argmax(test_pred, axis=1)

print("plot")
k = 0 # Try different image indices k
print("Label Prediction: %i"%test_labels[k])
fig = plt.figure(figsize=(2,2)); plt.axis('off')
plt.imshow(test_data[k,:,:,0]); plt.show()

print ("done")
# clean
#session.close()
开发者ID:yixiaoyang,项目名称:SmallData,代码行数:31,代码来源:cnn-tf-notebook.py

示例4: _fit

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 别名]
    def _fit(self, X, y, features_names=None, preload_features=None,
             ranking_th=0.005):
        X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'],
                         multi_output=True)
        # Initialization
        n_features = X.shape[1]
        features = np.arange(n_features)

        cv = self.cv
        cv = check_cv(cv, y, classifier=is_classifier(self.estimator))
        if sklearn.__version__ == '0.17':
            n_splits = cv.n_folds
        else:
            n_splits = cv.get_n_splits(X, y)

        if self.verbose > 0:
            print("Fitting {0} folds for each of iteration".format(n_splits))

        if 0.0 < self.n_features_step < 1.0:
            step = int(max(1, self.n_features_step * n_features))
        else:
            step = int(self.n_features_step)
        if step <= 0:
            raise ValueError("Step must be >0")

        if features_names is not None:
            features_names = np.array(features_names)
        else:
            if self.features_names is not None:
                features_names = self.features_names
            else:
                features_names = np.arange(n_features)  # use indices

        tentative_support_ = np.zeros(n_features, dtype=np.bool)
        current_support_ = np.zeros(n_features, dtype=np.bool)

        self.scores_ = []
        self.scores_confidences_ = []
        self.features_per_it_ = []

        if preload_features is not None:
            preload_features = np.unique(preload_features).astype('int')
            current_support_[preload_features] = True

            X_selected = X[:, features[current_support_]]
            y_hat, cv_scores = my_cross_val_predict(clone(self.estimator),
                                                    X_selected, y, cv=cv)
            target = y - y_hat
        else:
            target = y.copy()

        score, confidence_interval = -np.inf, 0
        proceed = np.sum(current_support_) < X.shape[1]
        while proceed:
            if self.verbose > 0:
                print('\nN-times variance of target: {}'.format(
                    target.var() * target.shape[0]))
            # update values
            old_confidence_interval = confidence_interval
            old_score = score

            if self.scale:
                target = StandardScaler().fit_transform(target.reshape(
                    -1, 1)).ravel()
                # target = MinMaxScaler().fit_transform(target.reshape(
                #     -1,1)).ravel()

            if self.verbose > 0:
                print()
                print('Feature ranking')
                print()
                print("target shape: {}".format(target.shape))
                print()

            # Rank the remaining features
            start_t = time.time()
            rank_estimator = clone(self.estimator)
            rank_estimator.fit(X, target)
            end_fit = time.time() - start_t

            # Get coefs
            start_t = time.time()
            if hasattr(rank_estimator, 'coef_'):
                coefs = rank_estimator.coef_
            elif hasattr(rank_estimator, 'feature_importances_'):
                coefs = rank_estimator.feature_importances_
            else:
                raise RuntimeError('The classifier does not expose '
                                   '"coef_" or "feature_importances_" '
                                   'attributes')
            end_rank = time.time() - start_t

            # Get ranks by ordering in ascending way
            if coefs.ndim > 1:
                ranks = np.argsort(safe_sqr(coefs).sum(axis=0))
                coefs = coefs.sum(axis=0)
            else:
                ranks = np.argsort(safe_sqr(coefs))

            # for sparse case ranks is matrix
#.........这里部分代码省略.........
开发者ID:teopir,项目名称:ifqi,代码行数:103,代码来源:ifs.py

示例5: numeric_fillna_standardize

# 需要导入模块: from sklearn.preprocessing import StandardScaler [as 别名]
# 或者: from sklearn.preprocessing.StandardScaler import reshape [as 别名]
    def numeric_fillna_standardize(x):
        x = x.cat.codes
        x.loc[x==-1] =  x[x!=-1].mean()
        x = StandardScaler().fit_transform(x.reshape(-1, 1))

        return x
开发者ID:wtadler,项目名称:attitudes-and-the-court,代码行数:8,代码来源:processing.py


注:本文中的sklearn.preprocessing.StandardScaler.reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。