本文整理汇总了Python中sklearn.pipeline.Pipeline.name方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline.name方法的具体用法?Python Pipeline.name怎么用?Python Pipeline.name使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline.name方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_estimator_pipe
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import name [as 别名]
def get_estimator_pipe(name, model, vec_only, num_alch_cat, lsa_comp, reducer, stem):
''' Concatenate a transform chain and a classifier. '''
chain = get_trf_chain(vec_only, num_alch_cat, lsa_comp, reducer, stem)
chain.append((name, model))
pipe = Pipeline(chain)
pipe.name = name
return pipe
示例2: build_simple_pipes
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import name [as 别名]
def build_simple_pipes():
''' Create classifier-only pipes (without prior transforms, if this is done upfront e.g.). '''
clfs = get_all_classifiers()
pipes = []
for clf in clfs:
pipe = Pipeline([(clf.name, clf)])
pipe.name = clf.name
pipes.append(pipe)
return pipes
示例3: predict
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import name [as 别名]
optimizer = keras.optimizers.Nadam(lr=0.002)
model.compile(loss='mse', optimizer=optimizer)
model.fit(x_seq, y_seq, batch_size=self.batch_size, verbose=1, nb_epoch=self.n_epochs, shuffle=False)
self.model = model
return self
def predict(self, x):
# merge the train and the test
x_merged = pd.concat([self.x_train, x])
start = len(x_merged.index) % (self.batch_size * self.sequence_length)
x_seq = self.sliding_window(x_merged.iloc[start:])
pred = self.model.predict(x_seq, batch_size=self.batch_size, verbose=1)
pred = np.vstack(pred)
res = pred[-len(x):, :]
return res
lstm = Pipeline([
("drop", FeatureRemover([c for c in x_all.columns if c[-2] == "-"])),
("scaleandnorm", ScaleAndNorm()),
("lstm", MyLSTM(batch_size=32, sequence_length=20, n_epochs=10))
])
lstm.name = "lstm"
lstm = SomeLinearWrapper(lstm)
示例4: fit
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import name [as 别名]
del x[col]
return x
def fit(self, x, y=None):
return self
model_ridge = Pipeline([
("drop", FeatureRemover(["ATTB"])),
("dropna", preprocessing.Imputer()),
("scale", preprocessing.StandardScaler()),
#("norm", preprocessing.Normalizer()),
("ridge", linear_model.Ridge(normalize=True, fit_intercept=True, alpha = 0.4)),
])
model_ridge.name = "ridge"
# subclassed to play with eta decay and dart booster
class MyXGBRegressor(xgboost.XGBRegressor):
# overwriting to get desired behaviour
def fit(self, X, y, eval_set=None, eval_metric=None,
early_stopping_rounds=None, verbose=True):
# pylint: disable=missing-docstring,invalid-name,attribute-defined-outside-init
"""
Fit the gradient boosting model
Parameters
----------
示例5: predict
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import name [as 别名]
model.add(Dense(700, input_dim=input_dim))
model.add(Activation('tanh'))
model.add(Dense(700))
model.add(Activation('tanh'))
model.add(Dense(300))
model.add(Activation('tanh'))
#model.add(Dropout(0.1))
model.add(Dense(output_dim))
model.add(Activation('relu'))
optimizer = keras.optimizers.Adam()
model.compile(loss='mse', optimizer=optimizer)
model.fit(x.as_matrix(), y.as_matrix(), batch_size=self.batch_size, verbose=1, nb_epoch=self.n_epochs, shuffle=True)
self.model = model
return self
def predict(self, x):
pred = self.model.predict(x.as_matrix(), batch_size=self.batch_size, verbose=1)
pred = np.vstack(pred)
return pred
nn = Pipeline([
("scaleandnorm", ScaleAndNorm()),
("nn", NN(batch_size=32, n_epochs=10))
])
nn.name = "nn"
nn = SomeLinearWrapper(nn)