本文整理汇总了Python中sklearn.ensemble.BaggingRegressor.fit方法的典型用法代码示例。如果您正苦于以下问题:Python BaggingRegressor.fit方法的具体用法?Python BaggingRegressor.fit怎么用?Python BaggingRegressor.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.ensemble.BaggingRegressor
的用法示例。
在下文中一共展示了BaggingRegressor.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_model
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def train_model(train, test, labels):
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=10)
#rf = RandomForestRegressor(n_estimators=45, max_depth=9, random_state=10)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.2, random_state=25)
clf.fit(train, labels)
#clf = SVR(C=1.0, epsilon=0.2)
#clf.fit(train, labels)
#clf = GaussianNB()
#clf.fit(train, labels)
print "Good!"
predictions = clf.predict(test)
print predictions.shape
predictions = pd.DataFrame(predictions, columns = ['relevance'])
print "Good again!"
print "Predictions head -------"
print predictions.head()
print predictions.shape
print "TEST head -------"
print test.head()
print test.shape
#test['id'].to_csv("TEST_TEST.csv",index=False)
#predictions.to_csv("PREDICTIONS.csv",index=False)
#test = test.reset_index()
#predictions = predictions.reset_index()
#test = test.groupby(level=0).first()
#predictions = predictions.groupby(level=0).first()
predictions = pd.concat([test['id'],predictions], axis=1, verify_integrity=False)
print predictions
return predictions
示例2: model_fit_rf_bagging
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def model_fit_rf_bagging():
def in_limits(x):
if x<1: return 1
if x>3: return 3
return x
print "STARTING MODEL"
X = full_data[['count_words','count_digits','match_d_title','match_d_description','match_w_title','match_w_description','match_d_attribute','match_w_attribute']].values
y = full_data['relevance'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
in_limits = np.vectorize(in_limits,otypes=[np.float])
y_pred = in_limits(y_pred)
RMSE = mean_squared_error(y_test, y_pred)**0.5
print "RMSE: ",RMSE
# for the submission
real_X_test = real_full_test[['count_words','count_digits','match_d_title','match_d_description','match_w_title','match_w_description','match_d_attribute','match_w_attribute']].values
test_pred = clf.predict(real_X_test)
test_pred = in_limits(test_pred)
return test_pred
示例3: avmPredict
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def avmPredict(params):
town = getPlace(params['lat'], params['long'])[0]
x, y, z = getXYZ(params['lat'], params['long'])
r = 1.0
data = []
target = []
header = []
with open('../../../data/working22.csv') as f:
f = csv.reader(f)
header = next(f)
for row in f:
t = (map(float, row[:3] + row[4:]), float(row[3]))
if weightF([x, y, z], t[0][0:3], r):
data.append(t[0])
target.append(t[1])
ensemble = BaggingRegressor()
ensemble.fit(data, target)
test = createTest(params)
return ensemble.predict(test)
示例4: train_bagging_xgboost
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def train_bagging_xgboost(X, Y):
adaboost = BaggingRegressor(xgb.XGBRegressor(max_depth=6, learning_rate=0.02, n_estimators=300, silent=True,
objective='reg:linear', subsample=0.7, reg_alpha=0.8,
reg_lambda=0.8, booster="gblinear")
, max_features=0.7, n_estimators=30)
adaboost.fit(X, Y)
return adaboost
示例5: random_forest
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def random_forest(X,Y,Xt):
print('learn')
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
clf.fit(X, Y)
print('predict')
Yp_clamped = clf.predict(Xt)
return Yp_clamped
示例6: procedureA
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def procedureA(goldenFlag = False):
# Trains and generates a prediction file
# Uses hard heuristic for buy_or_not
popFlag = True
X, Y = getDataXY(currYearFlag = False, popFlag = popFlag)
X, Y = shuffle(X, Y, random_state = 0)
if popFlag:
encoder = oneHot(X[:, 2:])
Xt = encoder.transform(X[:, 2:])
Xt = np.hstack((X[:,:2], Xt))
else:
encoder = oneHot(X)
Xt = encoder.transform(X)
buySet = set()
for i in range(X.shape[0]):
tmpTup = (X[i][0], X[i][2])
buySet.add(tmpTup)
# Y_buy = [1] * Xt.shape[0]
min_max_scaler = preprocessing.MinMaxScaler()
# Xt = min_max_scaler.fit_transform(Xt)
if goldenFlag:
print Xt.shape
Xt = getGoldenX(Xt, 2, 2 + encoder.feature_indices_[1], 2 + encoder.feature_indices_[0], 2 + min(9, encoder.feature_indices_[1]))
split = 0.9
X_train, X_test = Xt[:(int(Xt.shape[0]*split)),:], Xt[int(Xt.shape[0]*split):, :]
Y_train, Y_test = Y[:(int(Y.shape[0]*split)),:], Y[int(Y.shape[0]*split):, :]
Y_train = Y_train.ravel()
Y_test = Y_test.ravel()
print X_train.shape
print X_test.shape
# clf = Ridge(alpha = 100)
# clf = SVR(C = 10.0, kernel = 'poly', degree = 2)
# clf = LinearSVR(C = 1.0)
clf = BaggingRegressor(DecisionTreeRegressor(), n_estimators = 125, n_jobs = 4, random_state = 0)
# clf = AdaBoostRegressor(DecisionTreeRegressor(), n_estimators = 100)
# clf = DecisionTreeRegressor()
# clf = RandomForestRegressor(random_state = 0, n_estimators = 200, n_jobs = 4)
clf.fit(X_train, Y_train.ravel())
Y_pred = clf.predict(X_test)
evaluatePred(Y_pred, Y_test)
return clf, encoder, min_max_scaler
示例7: train_model
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def train_model(training, testing, window=5, n=5):
X_train, y_train = prepare_data(training)
X_test, y_test = prepare_data(testing)
rf = RandomForestRegressor()
rf.fit(X_train, y_train)
predrf = rf.predict(X_test)
print "mse for random forest regressor: ", mean_squared_error(predrf, y_test)
gb = GradientBoostingRegressor(n_estimators=100, learning_rate=0.025)
gb.fit(X_train, y_train)
predgb = gb.predict(X_test)
print "mse for gradient boosting regressor: ", mean_squared_error(predgb, y_test)
## plot feature importance using GBR results
fx_imp = pd.Series(gb.feature_importances_, index=['bb', 'momentum', 'sma', 'volatility'])
fx_imp /= fx_imp.max() # normalize
fx_imp.sort()
ax = fx_imp.plot(kind='barh')
fig = ax.get_figure()
fig.savefig("output/feature_importance.png")
adb = AdaBoostRegressor(DecisionTreeRegressor())
adb.fit(X_train, y_train)
predadb = adb.predict(X_test)
print "mse for adaboosting decision tree regressor: ", mean_squared_error(predadb, y_test)
scale = StandardScaler()
scale.fit(X_train)
X_trainscale = scale.transform(X_train)
X_testscale = scale.transform(X_test)
knn = BaggingRegressor(KNeighborsRegressor(n_neighbors=10), max_samples=0.5, max_features=0.5)
knn.fit(X_trainscale, y_train)
predknn = knn.predict(X_testscale)
print "mse for bagging knn regressor: ", mean_squared_error(predknn, y_test)
pred_test = 0.1*predrf+0.2*predgb+0.1*predadb+0.6*predknn
print "mse for ensemble all the regressors: ", mean_squared_error(pred_test, y_test)
result = testing.copy()
result.ix[5:-5, 'trend'] = pred_test
result.ix[10:, 'pred'] = pred_test * result.ix[5:-5, 'IBM'].values
result.ix[:-5, 'pred_date'] = result.index[5:]
return result
示例8: procc_modelfusion
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def procc_modelfusion(df_test, data_test):
from sklearn.ensemble import BaggingRegressor
from sklearn import linear_model
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title')
train_np = train_df.as_matrix()
# y即Survival结果
y = train_np[:, 0]
# X即特征属性值
X = train_np[:, 1:]
# fit到BaggingRegressor之中
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
bagging_clf = BaggingRegressor(clf, n_estimators=10, max_samples=0.8, max_features=1.0, bootstrap=True, bootstrap_features=False, n_jobs=-1)
bagging_clf.fit(X, y)
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass.*|Mother|Child|Family|Title')
predictions = bagging_clf.predict(test)
result = pd.DataFrame({'PassengerId' : data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("logistic_regression_predictions3.csv", index=False)
示例9: get_bagging_prediction
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def get_bagging_prediction(X_train, y_train, X_test, X_valid=None, GS=False):
if not GS:
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if X_valid is None:
return y_pred
else:
return y_pred, clf.predict(X_valid)
else:
rf = RandomForestRegressor(n_estimators=15, max_depth=6, random_state=0)
clf = BaggingRegressor(rf, n_estimators=45, max_samples=0.1, random_state=25)
param_grid = {'rfr__max_features': [10], 'rfr__max_depth': [20]}
model = grid_search.GridSearchCV(estimator=clf, param_grid=param_grid, n_jobs=-1, cv=2, verbose=VERBOSE, scoring=RMSE)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
if X_valid is None:
return y_pred
else:
return y_pred, model.predict(X_valid)
示例10: Regressor
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
class Regressor(BaseEstimator):
def __init__(self):
# self.clf = GradientBoostingRegressor(n_estimators=200, max_features="sqrt", max_depth=5)
# self.clf = LinearRegression()
self.clf = BaggingRegressor(LinearRegression())
# self.clf = GaussianProcess(theta0=4)
# self.sp = RandomizedLasso()
self.sp = SparseRandomProjection(n_components=5)
# self.sp = TruncatedSVD()
# self.sp = KernelPCA(n_components=3, tol=0.0001, kernel="poly")
# self.clf = ExtraTreesRegressor(n_estimators=200, max_features="sqrt", max_depth=5)
def fit(self, X, y):
# print(self.sp)
# Xr = self.sp.fit_transform(X, y)
self.clf.fit(X, y.ravel())
def predict(self, X):
# Xr = self.sp.transform(X)
return self.clf.predict(X)
示例11: runTests
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def runTests():
# Generate the training samples, extract training features and target
trainSamples = GenSamples(numSamples)
trainFeatures = extractFeatures(trainSamples)
trainPred = extractPred(trainSamples)
# Generate the test samples, extracr test features and target
testSamples = GenSamples(numTestSamples)
testFeatures = extractFeatures(testSamples)
testPred = extractPred(testSamples)
R2List = OrderedDict()
R2List['TrainROI'] = []
R2List['TestROI'] = []
print 'Running Tests: '
for i in range(numTests):
# Bootstrap is True by default i.e., sampling with replacement
# Bootstrap features is False by default i.e., all features used
classifier = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=numTrees,
max_samples=int(0.5*numSamples),
max_features=int(1))
classifier.fit(trainFeatures, trainPred)
predictROI = {}
predictROI['Training'] = classifier.predict(trainFeatures)
predictROI['Test'] = classifier.predict(testFeatures)
R2 = {}
R2['Train'] = r2_score(trainPred, predictROI['Training'])
R2['Test'] = r2_score(testPred, predictROI['Test'])
R2List['TrainROI'].append(R2['Train'])
R2List['TestROI'].append(R2['Test'])
print 'Best Train ROI: ', max(R2List['TrainROI'])
print 'Best Test ROI: ', max(R2List['TestROI'])
示例12: test_bagging_regressor_with_missing_inputs
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
def test_bagging_regressor_with_missing_inputs():
# Check that BaggingRegressor can accept X with missing/infinite data
X = np.array([
[1, 3, 5],
[2, None, 6],
[2, np.nan, 6],
[2, np.inf, 6],
[2, np.NINF, 6],
])
y_values = [
np.array([2, 3, 3, 3, 3]),
np.array([
[2, 1, 9],
[3, 6, 8],
[3, 6, 8],
[3, 6, 8],
[3, 6, 8],
])
]
for y in y_values:
regressor = DecisionTreeRegressor()
pipeline = make_pipeline(
Imputer(),
Imputer(missing_values=np.inf),
Imputer(missing_values=np.NINF),
regressor
)
pipeline.fit(X, y).predict(X)
bagging_regressor = BaggingRegressor(pipeline)
y_hat = bagging_regressor.fit(X, y).predict(X)
assert_equal(y.shape, y_hat.shape)
# Verify that exceptions can be raised by wrapper regressor
regressor = DecisionTreeRegressor()
pipeline = make_pipeline(regressor)
assert_raises(ValueError, pipeline.fit, X, y)
bagging_regressor = BaggingRegressor(pipeline)
assert_raises(ValueError, bagging_regressor.fit, X, y)
示例13: BaggingRegressor
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
class BaggingRegressor(BaseEstimator):
"""
Usage:
```
"model": {
"class": "ume.ensemble.BaggingRegressor",
"params": {
"base_estimator": {
"class": "sklearn.svm.SVR",
"params": {
"kernel": "rbf",
"degree": 1,
"C": 1000000.0,
"epsilon": 0.01,
},
},
"bag_kwargs": {
"n_estimators": 100,
"n_jobs": 5,
"max_samples": 0.9,
},
}
}
```
"""
def __init__(self, base_estimator=None, bag_kwargs=None):
klass = dynamic_load(base_estimator['class'])
svr_reg = klass(**base_estimator['params'])
self.__clf = SK_BaggingRegressor(base_estimator=svr_reg, **bag_kwargs)
def fit(self, X, y):
return self.__clf.fit(X, y)
def predict(self, X):
return self.__clf.predict(X)
示例14: RandomForestRegressor
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
X[np.isnan(X)] = 0.
print '******************************************'
print name
print '******************************************'
if name=='Boston' or name=='Diabetes': # Regression problem
rfr = RandomForestRegressor(**params)
rfr.fit(X, y)
print 'Score RandomForestRegressor = %s' % (rfr.score(X, y))
scores_rfr = cross_val_score(rfr, X, y ,cv=5)
print 'Cross Val Score RandomForestRegressor = %s' % (np.mean(scores_rfr))
br = BaggingRegressor(base_estimator=DecisionTreeRegressor(max_depth=max_depth), n_estimators=n_estimators)
br.fit(X, y)
print 'Score BaggingRegressor = %s' % (br.score(X, y))
scores_br = cross_val_score(br, X, y, cv=5)
print 'Cross Val Scores of BR = %s' %(np.mean(scores_br))
if name=='Iris' or name=='Digits': # Classificaiton problem
rfc = RandomForestClassifier(**params)
rfc.fit(X, y)
print 'Score RandomForestClassifier = %s' % (rfc.score(X, y))
scores_rfc = cross_val_score(rfc, X, y ,cv=5)
print 'Corss Val Scores of RandomForestClassifier = %s' %(np.mean(scores_rfc))
bc = BaggingClassifier(base_estimator=DecisionTreeClassifier(max_depth=max_depth), n_estimators=n_estimators)
bc.fit(X, y)
print 'Score BaggingClassifier == %s' % (bc.score(X, y))
示例15: zip
# 需要导入模块: from sklearn.ensemble import BaggingRegressor [as 别名]
# 或者: from sklearn.ensemble.BaggingRegressor import fit [as 别名]
ax2.set_title('Error between actual and predicted loads')
ax2.set_ylabel("Error, MW")
featImportances=gradBoost.feature_importances_
pos = np.arange(len(features))
pairs = zip(features, featImportances)
sorted_pairs = sorted(pairs, key = lambda pair: pair[1])
features_sorted, featImportances_sorted = zip(*sorted_pairs)
fig, ax = plt.subplots()
plt.barh(pos, featImportances_sorted, 1, color = "blue")
plt.yticks(pos,features_sorted)
ax.set_title('Gradient Boosting: Relative Feature Importance')
#Tree Bagging
TreeBagger=BaggingRegressor()
TreeBagger.fit(Xtrain, Ytrain)
fig = plt.figure()
ax1 = fig.add_subplot(2, 1, 1)
ax1.plot_date(dates, modeldata.Load[45000:50000], 'r-',tz=None, xdate=True,
ydate=False, label='Actual Load')
ax1.set_title('Tree Bagging: Actual and Predicted Loads')
plt.plot(dates, TreeBagger.predict(Xtest), 'g-',label='Predicted Load')
ax1.legend()
ax2 = fig.add_subplot(2, 1, 2)
ax2.plot_date(dates, modeldata.Load[45000:50000]-TreeBagger.predict(Xtest), 'r-',tz=None, xdate=True,
ydate=False)
ax2.set_title('Error between actual and predicted loads, MW')
MSEs_Bagging=[mean_squared_error(Ytest, TreeBagger.predict(Xtest)), mean_squared_error(Ytrain, TreeBagger.predict(Xtrain))]
#Model Comparison: Bar charts