本文整理匯總了Python中sklearn.preprocessing.StandardScaler.get_params方法的典型用法代碼示例。如果您正苦於以下問題:Python StandardScaler.get_params方法的具體用法?Python StandardScaler.get_params怎麽用?Python StandardScaler.get_params使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.preprocessing.StandardScaler
的用法示例。
在下文中一共展示了StandardScaler.get_params方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: print
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import get_params [as 別名]
spatial_size=FeatureVectorConfig.SPATIALSIZE,
hist_feat=FeatureVectorConfig.HISTOGRAMFEATURES,
hist_bins=FeatureVectorConfig.HISTOGRAMBINS,
hog_feat=FeatureVectorConfig.HOGFEATURES)
t2 = time.time()
print(round(t2-t, 2), 'Seconds to extract HOG features...')
# Create an array stack of feature vectors
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
scaled_X = X_scaler.transform(X)
# save the scaler
print('X_scaler: ', X_scaler, ", get_params:", X_scaler.get_params(deep=True), ", mean:", X_scaler.mean_, ", std:", X_scaler.std_)
print('saving scaler to: ', SCALERFILENAME)
#SaveAndRestoreClassifier.saveScalerFitX(X, SCALERFILENAME)
SaveAndRestoreClassifier.saveScaler(X_scaler, SCALERFILENAME)
# Define the labels vector
y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
X_train, X_test, y_train, y_test = train_test_split(
scaled_X, y, test_size=0.2, random_state=rand_state)
print('Using:',FeatureVectorConfig.ORIENTATIONBINS,'orientations',FeatureVectorConfig.PIXELSPERCELL,
'pixels per cell and', FeatureVectorConfig.CELLSPERBLOCK,'cells per block')
print('Feature vector length:', len(X_train[0]))
示例2: StandardScaler
# 需要導入模塊: from sklearn.preprocessing import StandardScaler [as 別名]
# 或者: from sklearn.preprocessing.StandardScaler import get_params [as 別名]
scaler = StandardScaler()
means = np.mean(X_train)
std = np.std(X_train)
print means[0]
scaler.mean_ = np.zeros(len(means))
scaler.std_ = np.ones(len(means))
for i in range(len(means)):
scaler.mean_[i] = means[i]
scaler.std_[i] = std[i]
print scaler.mean_
#scaler.mean_ =
#X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
print scaler.get_params(deep=True)
print scaler.mean_
print scaler.std_
sys.exit()
# Let's retrain a new model on the first subset call the **training set**:
# In[15]:
from sklearn.ensemble import AdaBoostClassifier as ABC
from sklearn.tree import DecisionTreeClassifier as DC
dt = DC(max_depth=3,min_samples_leaf=0.05*len(X_train))
abc = ABC(dt,algorithm='SAMME',
n_estimators=8,
learning_rate=0.5)