本文整理汇总了Python中sklearn.linear_model.PassiveAggressiveClassifier类的典型用法代码示例。如果您正苦于以下问题:Python PassiveAggressiveClassifier类的具体用法?Python PassiveAggressiveClassifier怎么用?Python PassiveAggressiveClassifier使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PassiveAggressiveClassifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_and_predict_m7
def train_and_predict_m7 (train, test, labels) :
## Apply basic concatenation + stemming
trainData, testData = stemmer_clean (train, test, stemmerEnableM7, stemmer_type = 'snowball')
## TF-IDF transform with sub-linear TF and stop-word removal
tfv = TfidfVectorizer(min_df = 5, max_features = None, strip_accents = 'unicode', analyzer = 'word', token_pattern = r'\w{1,}', ngram_range = (1, 5), smooth_idf = 1, sublinear_tf = 1, stop_words = ML_STOP_WORDS)
tfv.fit(trainData)
X = tfv.transform(trainData)
X_test = tfv.transform(testData)
## Create the classifier
print ("Fitting Passive-Aggressive Classifer...")
clf = PassiveAggressiveClassifier(random_state = randomState, loss = 'squared_hinge', n_iter = 100, C = 0.01)
## Create a parameter grid to search for best parameters for everything in the pipeline
# Note: minkowski with p > 2 does not work for sparse matrices
param_grid = {'C' : [0.003, 0.01, 0.03, 0.1], 'loss': ['hinge', 'squared_hinge'], 'n_iter': [5, 10, 30, 100, 300]}
#param_grid = {'C' : [0.003, 0.01, 0.03, 0.1, 0.3, 1], 'loss': ['hinge'], 'n_iter': [5, 10, 30, 100, 300, 1000]}
## Predict model with best parameters optimized for quadratic_weighted_kappa
if (gridSearch) :
model = perform_grid_search (clf, param_grid, X, labels)
pred = model.predict(X_test)
else :
clf.fit(X, labels)
pred = clf.predict(X_test)
return pred
示例2: test_classifier_refit
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier(max_iter=5).fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
示例3: PassiveAggressiveClassifier_1
def PassiveAggressiveClassifier_1(train_predictors,test_predictors,train_target,test_target):
clf = PassiveAggressiveClassifier()
clf.fit(train_predictors,train_target)
predicted = clf.predict(test_predictors)
accuracy = accuracy_score(test_target, predicted)
print "Accuracy for Linear Model PassiveAggressiveClassifier: "+str(accuracy)
return accuracy,predicted
示例4: test_classifier_accuracy
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
示例5: train_online_model
def train_online_model(xtr, ytr, model=None):
# Train classifier
t0 = time.time()
if model is None:
model = PassiveAggressiveClassifier()
model.fit(xtr, ytr)
else:
model.partial_fit(xtr, ytr)
print "Training took %.2f seconds" % (time.time()-t0)
return model
示例6: test_classifier_partial_fit
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
示例7: test_passive_aggressive_2
def test_passive_aggressive_2():
"""Ensure that the TPOT PassiveAggressiveClassifier outputs the same as the sklearn classifier when C == 0.0"""
tpot_obj = TPOT()
result = tpot_obj._passive_aggressive(training_testing_data, 0.0, 0)
result = result[result['group'] == 'testing']
pagg = PassiveAggressiveClassifier(C=0.0001, loss='hinge', fit_intercept=True, random_state=42)
pagg.fit(training_features, training_classes)
assert np.array_equal(result['guess'].values, pagg.predict(testing_features))
示例8: mainworker
def mainworker(limit1,limit2):
N=10
l=[]
w1=[] # +1 class
w2=[]#-1 class
temp=[]
classlist=[]
f=open("pdata.txt")
for line in f:
x=(line.strip("\n")).split(",")
temp=[]
for i in xrange(len(x)):
x[i]=int(x[i])
temp.append(x[i])
clas=temp.pop()
temp=temp[:limit1]+temp[limit2+1:]
l.append(temp)
classlist.append(clas)
"""if(temp[-1]==-1):
w2.append(temp)
else:
w1.append(temp)"""
f.close()
X=np.array(l)
y=np.array(classlist)
X=np.array(l)
y=np.array(classlist)
karray=[2,3,4,5]
for k in karray:
kf = cross_validation.KFold(11054, n_folds=k)
averager=[]
for train_index,test_index in kf:
#print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
#print X_train, len(X_test), len(y_train), len(y_test)
train_data=[]
test_data=[]
train_label=[]
test_label=[]
X1 = X_train#train_data
Y1 = y_train#train_label
clf = PassiveAggressiveClassifier()
#clf = svm.SVC(kernel='linear')
clf.fit(X1,Y1)
Z = X_test#test_data
predicted = clf.predict(Z)
accuracy = getAccuracy(predicted, y_test)#test_label)
averager.append(accuracy)
answer=np.mean(averager)
print "The mean for",k,"fold is:"
print answer
示例9: TrainSVM
def TrainSVM(data,labels):
usealgo = 1
if usealgo == 0:
from sklearn.linear_model import PassiveAggressiveClassifier
clf=PassiveAggressiveClassifier(class_weight='balanced',n_jobs=-1,n_iter=15,fit_intercept=True)
elif usealgo ==1:
clf = SVC(probability= True,decision_function_shape='ovr',random_state=np.random.randint(1000),kernel="linear")
elif usealgo ==2:
from sklearn.svm import LinearSVC
clf = LinearSVC()
clf.fit(data,labels)
return clf
示例10: __init__
class DeployedClassifierFactory:
def __init__(self, term_doc_matrix, term_doc_matrix_factory, category, nlp=None):
'''This is a class that enables one to train and save a classification model.
Parameters
----------
term_doc_matrix : TermDocMatrix
term_doc_matrix_factory : TermDocMatrixFactory
category : str
Category name
nlp : spacy parser
'''
self._term_doc_matrix = term_doc_matrix
self._term_doc_matrix_factory = term_doc_matrix_factory
assert term_doc_matrix_factory._nlp is None
assert term_doc_matrix_factory.category_text_iter is None
self._category = category
self._clf = None
self._proba = None
def passive_aggressive_train(self):
'''Trains passive aggressive classifier
'''
self._clf = PassiveAggressiveClassifier(n_iter=50, C=0.2, n_jobs=-1, random_state=0)
self._clf.fit(self._term_doc_matrix._X, self._term_doc_matrix._y)
y_dist = self._clf.decision_function(self._term_doc_matrix._X)
pos_ecdf = ECDF(y_dist[y_dist >= 0])
neg_ecdf = ECDF(y_dist[y_dist <= 0])
def proba_function(distance_from_hyperplane):
if distance_from_hyperplane > 0:
return pos_ecdf(distance_from_hyperplane) / 2. + 0.5
elif distance_from_hyperplane < 0:
return pos_ecdf(distance_from_hyperplane) / 2.
return 0.5
self._proba = proba_function
return self
def build(self):
'''Builds Depoyed Classifier
'''
if self._clf is None:
raise NeedToTrainExceptionBeforeDeployingException()
return DeployedClassifier(self._category,
self._term_doc_matrix._category_idx_store,
self._term_doc_matrix._term_idx_store,
self._term_doc_matrix_factory)
示例11: test_classifier_accuracy
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, max_iter=30, fit_intercept=fit_intercept,
random_state=1, average=average, tol=None)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert hasattr(clf, 'average_coef_')
assert hasattr(clf, 'average_intercept_')
assert hasattr(clf, 'standard_intercept_')
assert hasattr(clf, 'standard_coef_')
示例12: test_classifier_correctness
def test_classifier_correctness(loss):
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPassiveAggressive(
C=1.0, loss=loss, fit_intercept=True, n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(
C=1.0, loss=loss, fit_intercept=True, max_iter=2,
shuffle=False, tol=None)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
示例13: test_classifier_partial_fit
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
for average in (False, True):
clf = PassiveAggressiveClassifier(
C=1.0, fit_intercept=True, random_state=0,
average=average, max_iter=5)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
if average:
assert hasattr(clf, 'average_coef_')
assert hasattr(clf, 'average_intercept_')
assert hasattr(clf, 'standard_intercept_')
assert hasattr(clf, 'standard_coef_')
示例14: PassiveAggressiveModel
class PassiveAggressiveModel(BaseModel):
def __init__(self, cached_features):
BaseModel.__init__(self, cached_features)
self.model = PassiveAggressiveClassifier(loss='squared_hinge', C=1.0, random_state=1)
def _predict_internal(self, X_test):
return self.model.predict(X_test)
示例15: test_classifier_correctness
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())