本文整理汇总了Python中sklearn.linear_model.Perceptron.partial_fit方法的典型用法代码示例。如果您正苦于以下问题:Python Perceptron.partial_fit方法的具体用法?Python Perceptron.partial_fit怎么用?Python Perceptron.partial_fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.Perceptron
的用法示例。
在下文中一共展示了Perceptron.partial_fit方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DrunkLearningOnline
# 需要导入模块: from sklearn.linear_model import Perceptron [as 别名]
# 或者: from sklearn.linear_model.Perceptron import partial_fit [as 别名]
class DrunkLearningOnline(DrunkLearningBatch):
"""drunk_learning class for online learning"""
def __init__(self):
super(DrunkLearningOnline, self).__init__()
self.clf = Perceptron()
self.filename = 'modelPerceptron.pkl'
def partial_fit(self, X, y):
X = np.array([X])
y = np.array(y)
self.clf.partial_fit(X, y, [0, 1])
joblib.dump(self.clf, self.filename, compress=9)
示例2: train_classifiers
# 需要导入模块: from sklearn.linear_model import Perceptron [as 别名]
# 或者: from sklearn.linear_model.Perceptron import partial_fit [as 别名]
def train_classifiers(models, train_data):
classifiers = dict()
for modelname, model in models.items():
if settings["classifier"] == "Perceptron":
classifier = Perceptron()
if settings["classifier"] == "PassiveAggressive":
classifier = PassiveAggressiveClassifier()
for sample_no, (text, is_acq) in enumerate(train_data):
bow = dictionary.doc2bow(simple_preprocess(text))
model_features = sparse2full(model[bow], model.__out_size)
label = np.array([is_acq])
#ln.debug("%s, %s "% (model_features, label.shape))
classifier.partial_fit(model_features, label, classes=np.array([True, False]))
if sample_no % 500 == 0:
ln.debug("Classifier for %s trained %s samples so far." % (modelname, sample_no))
classifiers[modelname] = classifier
ln.info("Finished training classifier for %s" % modelname)
return classifiers
示例3: Perceptron
# 需要导入模块: from sklearn.linear_model import Perceptron [as 别名]
# 或者: from sklearn.linear_model.Perceptron import partial_fit [as 别名]
# For looping through chunks of data, set step size
step_size = 1000
percept = Perceptron(n_jobs = -1)
prev = 0
nxt = step_size
X_train = features_to_train[prev:nxt,:]
Y_train = targets_to_train[prev:nxt]
print len(X_train)
print len(Y_train)
percept.partial_fit(X_train, Y_train, classes=np.unique(targets_to_train))
prev += step_size
nxt += step_size
for i in range(len(features_to_train) / step_size - 1):
X_train = features_to_train[prev:nxt,:]
Y_train = targets_to_train[prev:nxt]
percept.partial_fit(X_train, Y_train)
predicted_targets = percept.predict(features_to_test)
prev += step_size
nxt += step_size
示例4: StreamingLearner
# 需要导入模块: from sklearn.linear_model import Perceptron [as 别名]
# 或者: from sklearn.linear_model.Perceptron import partial_fit [as 别名]
#.........这里部分代码省略.........
self.count = {
"train": {
"pos": 0,
"neg": 0,
},
"test": {
"pos": 0,
"neg": 0,
}
}
self.train = 1
self.eval_count = {
"pos": {"tp": 0, "fp": 0, "fn": 0},
"neg": {"tp": 0, "fp": 0, "fn": 0},
}
super(StreamingLearner, self).__init__(zmq_sub_string, channel)
def on_msg(self, tweet):
print_tick()
if tweet.get("lang") != "en":
return # skip non english tweets
emoticons = self.re_emoticons.findall(tweet["text"])
if not emoticons:
return # skip tweets without emoticons
text = self.re_emoticons.sub("", tweet["text"].replace("\n", ""))
X = self.vec.transform([text])
# label for message
last_emoticon = emoticons[-1]
if last_emoticon == ":)":
label = "pos"
elif last_emoticon == ":(":
label = "neg"
y = np.asarray([label])
if not self.train:
# use every 5th message for evaluation
print("")
print("TEST %s |" % label, text)
self.count["test"][label] += 1
y_pred = self.clf.predict(X)
pred_label, gold_label = y_pred[0], label
print("PRED: ", pred_label)
if pred_label == gold_label:
self.eval_count[gold_label]["tp"] += 1
else:
self.eval_count[pred_label]["fp"] += 1
self.eval_count[gold_label]["fn"] += 1
pos_acc = (
self.eval_count["pos"]["tp"] / self.count["test"]["pos"]
) if self.count["test"]["pos"] else 0
neg_acc = (
self.eval_count["neg"]["tp"] / self.count["test"]["neg"]
) if self.count["test"]["neg"] else 0
print("*** CLF TESTED ON: %s :) samples (Acc %.3f),"
" %s :( samples (Acc %.3f)" %
(self.count["test"]["pos"], pos_acc,
self.count["test"]["neg"], neg_acc))
print(json.dumps(self.eval_count, indent=2))
print()
else:
self.count["train"][label] += 1
# set higher sample weight for underrepresented class
tc = self.count["train"]
if label == "pos":
sample_weight = min(3, max(1, tc["neg"] - tc["pos"]))
elif label == "neg":
sample_weight = min(3, max(1, tc["pos"] - tc["neg"]))
else:
sample_weight = 0
print("\nTRAIN %s (weight %s) |" % (label, sample_weight), text)
print(">>> CLF TRAINED ON: %s :) samples, %s :( samples" % (
self.count["train"]["pos"], self.count["train"]["neg"]))
self.clf.partial_fit(X, y, self.classes, [sample_weight])
self.train += 1
# use every 5th message for evaluation
if not self.train % 5:
self.train = 0
示例5: FeatureHasher
# 需要导入模块: from sklearn.linear_model import Perceptron [as 别名]
# 或者: from sklearn.linear_model.Perceptron import partial_fit [as 别名]
# return X
fh = FeatureHasher(n_features = 2**20, input_type="string", non_negative=True)
# ohe = OneHotEncoder(categorical_features=columns)
# Train classifier
clf = Perceptron()
train = pd.read_csv("testtrain.csv", chunksize = 50000, iterator = True)
all_classes = np.array([0, 1])
for chunk in train:
y_train = chunk["click"]
chunk = chunk[cols]
chunk = chunk.join(pd.DataFrame([dayhour(x) for x in chunk.hour], columns=["wd", "hr"]))
chunk.drop(["hour"], axis=1, inplace = True)
Xcat = fh.transform(np.asarray(chunk.astype(str)))
clf.partial_fit(Xcat, y_train, classes=all_classes)
# Create a submission file
usecols = cols + ["id"]
X_test = pd.read_csv("testtest.csv", usecols=usecols)
X_test = X_test.join(pd.DataFrame([dayhour(x) for x in X_test.hour], columns=["wd", "hr"]))
X_test.drop(["hour"], axis=1, inplace = True)
X_enc_test = fh.transform(np.asarray(X_test.astype(str)))
y_act = pd.read_csv("testtest.csv", usecols=['click'])
y_pred = clf.predict(X_enc_test)
with open('logloss.txt','a') as f:
f.write('\n'+str(log_loss(y_act, y_pred))+'\tPerceptron')