本文整理汇总了Python中sklearn.pipeline.Pipeline.partial_fit方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline.partial_fit方法的具体用法?Python Pipeline.partial_fit怎么用?Python Pipeline.partial_fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline.partial_fit方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model_iter
# 需要导入模块: from sklearn.pipeline import Pipeline [as 别名]
# 或者: from sklearn.pipeline.Pipeline import partial_fit [as 别名]
def model_iter(train_file_list, newdata_file, idcol, tcol,
learner, lparams=None, drops=None, split=0.1, scaler=None, ofile=None, seed=123, verbose=False):
"""
Build and run ML algorihtm for given train/test dataframe
and classifier name. The learners are defined externally
in DCAF.ml.clf module.
"""
if learner not in ['SGDClassifier', 'SGDRegressor']:
raise Exception("Unsupported learner %s" % learner)
clf = learners()[learner]
setattr(clf, "random_state", seed)
random.seed(seed)
if lparams:
if isinstance(lparams, str):
lparams = json.loads(lparams)
elif isinstance(lparams, dict):
pass
else:
raise Exception('Invalid data type for lparams="%s", type: %s' % (lparams, type(lparams)))
for key, val in lparams.items():
setattr(clf, key, val)
if scaler:
clf = Pipeline([('scaler',getattr(preprocessing, scaler)()), ('clf', clf)])
print("clf:", clf)
if drops:
if isinstance(drops, basestring):
drops = drops.split(',')
if idcol not in drops:
drops += [idcol]
else:
drops = [idcol]
fit = None
for train_file in train_file_list:
print("Train file", train_file)
# read data and normalize it
xdf = read_data(train_file, drops, scaler=scaler)
# get target variable and exclude choice from train data
target = xdf[tcol]
xdf = xdf.drop(tcol, axis=1)
if verbose:
print("Columns:", ','.join(xdf.columns))
print("Target:", target)
if split:
x_train, x_rest, y_train, y_rest = \
train_test_split(xdf, target, test_size=0.1, random_state=seed)
time0 = time.time()
fit = clf.partial_fit(x_train, y_train)
if verbose:
print("Train elapsed time", time.time()-time0)
print("### SCORE", clf.score(x_rest, y_rest))
else:
x_train = xdf
y_train = target
time0 = time.time()
fit = clf.partial_fit(x_train, y_train)
if verbose:
print("Train elapsed time", time.time()-time0)
# new data for which we want to predict
if newdata_file:
tdf = read_data(newdata_file, drops, scaler=scaler)
if tcol in tdf.columns:
tdf = tdf.drop(tcol, axis=1)
datasets = [int(i) for i in list(tdf['dataset'])]
dbs_h = get_dbs_header(tdf, newdata_file)
dbses = [int(i) for i in list(tdf[dbs_h])]
predictions = fit.predict_proba(tdf)
data = {'dataset':datasets, dbs_h: dbses, 'prediction':predictions}
out = pd.DataFrame(data=data)
if ofile:
out.to_csv(ofile, header=True, index=False)