本文整理汇总了Python中sklearn.svm.SVC.predict_log_proba方法的典型用法代码示例。如果您正苦于以下问题:Python SVC.predict_log_proba方法的具体用法?Python SVC.predict_log_proba怎么用?Python SVC.predict_log_proba使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.svm.SVC
的用法示例。
在下文中一共展示了SVC.predict_log_proba方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: buildModel3
# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict_log_proba [as 别名]
def buildModel3(self):
#feats = [['words2vec'],['pos'],['clusteredLexicons']]
feats = [['words2vec'],['pos'],['clusteredLexicons']]
y_attribute = 'stance'
y_pred = []
y_t = []
for f in feats:
X,y = self.fe.getFeaturesMatrix('train',f,y_attribute)
Xt,yt = self.fe.getFeaturesMatrix('test',f,y_attribute)
clf = SVC(C=1, probability=True)
clf = clf.fit(X,y)
train_transform = clf.predict_log_proba(X)
test_transform = clf.predict_log_proba(Xt)
# print 'Train transform ',train_transform.shape
# print 'Test transform ',test_transform.shape
y_pred.append(train_transform)
y_t.append(test_transform)
#y_pred_h = np.hstack(tuple(y_pred))
#y_t_h = np.hstack(tuple(y_t))
x = 0
for i in y_pred:
x += i
y_pred_h = x
x = 0
for i in y_t:
x += i
y_t_h = x
# print type(y_pred_h)
# print y_pred_h[0]
# print y_pred_h.shape
regr = linear_model.LogisticRegression()
regr.fit(y_pred_h, y)
final_pred = regr.predict(y_t_h)
print accuracy_score(final_pred, yt)
pprint(self.eval.computeFscores(self.data.testTweets, self.fe.labelenc.inverse_transform(final_pred)))
示例2: SVM_predict_rank
# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict_log_proba [as 别名]
def SVM_predict_rank(features, classes, unknown, actual_classes):
"""
Proviced a ranking of the different authors by likelyhood of having authored each unknown text.
"""
FP = Feature_Preprocessor(features, True, False, 30)
features = FP.batch_normalize(features)
unknown = FP.batch_normalize(unknown)
clf = SVC(probability=True, kernel='rbf', C=2.4, degree=1, gamma=0.7/len(features[0]))
clf.fit(features, classes)
# I'm sorry about the following lines:
predictions = map(lambda x : zip(clf.classes_, x), clf.predict_log_proba(unknown))
orderings = zip(map(lambda x : sorted(x, key = lambda s : s[1], reverse=True), predictions), actual_classes)
orderings = [([ e[0] for e in l[0] ], l[1]) for l in orderings]
rankings = map(lambda x : x[0].index(x[1]), orderings )
return rankings
示例3: Scorer
# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict_log_proba [as 别名]
class Scorer(BaseEstimator, TransformerMixin, Evaluable):
""" Scorer
Parameters
----------
centering : bool (default: True)
mean normalized the vectors
wccn : bool (default: True)
within class covariance normalization
lda : bool (default: True)
Linear Discriminant Analysis
concat : bool (default: False)
concatenate original vector to the normalized vector
method : {'cosine', 'svm'}
method for scoring
"""
def __init__(self, centering=True, wccn=True, lda=True, concat=False,
method='cosine', labels=None):
super(Scorer, self).__init__()
self._normalizer = VectorNormalizer(
centering=centering, wccn=wccn, lda=lda, concat=concat)
self._labels = labels
method = str(method).lower()
if method not in ('cosine', 'svm'):
raise ValueError('`method` must be one of the following: cosine, svm; '
'but given: "%s"' % method)
self._method = method
# ==================== properties ==================== #
@property
def method(self):
return self._method
@property
def feat_dim(self):
return self._normalizer.feat_dim
@property
def labels(self):
return self._labels
@property
def nb_classes(self):
return len(self._labels)
@property
def is_initialized(self):
return self._normalizer.is_initialized
@property
def is_fitted(self):
return self._normalizer.is_fitted
@property
def normalizer(self):
return self._normalizer
@property
def lda(self):
return self._normalizer.lda
# ==================== sklearn ==================== #
def fit(self, X, y):
# ====== preprocessing ====== #
if isinstance(X, (tuple, list)):
X = np.asarray(X)
if isinstance(y, (tuple, list)):
y = np.asarray(y)
# ====== vector normalizer ====== #
self._normalizer.fit(X, y)
if self._labels is None:
if y.ndim >= 2:
y = np.argmax(y, axis=-1)
self._labels = np.unique(y)
# ====== for SVM method ====== #
if self.method == 'svm':
X = self._normalizer.transform(X)
# normalize to [0, 1]
X = 2 * (X - self._normalizer.vmin) /\
(self._normalizer.vmax - self._normalizer.vmin) - 1
self._svm = SVC(C=1, kernel='rbf', gamma='auto', coef0=1,
shrinking=True, random_state=0,
probability=True, tol=1e-3,
cache_size=1e4, class_weight='balanced')
self._svm.fit(X, y)
self.predict_proba = self._predict_proba
return self
def _predict_proba(self, X):
if self.method != 'svm':
raise RuntimeError("`predict_proba` only for 'svm' method")
return self._svm.predict_proba(self._normalizer.transform(X))
def predict_log_proba(self, X):
return self.transform(X)
def transform(self, X):
# [nb_samples, nb_classes - 1] (if LDA applied)
#.........这里部分代码省略.........
示例4: SVC
# 需要导入模块: from sklearn.svm import SVC [as 别名]
# 或者: from sklearn.svm.SVC import predict_log_proba [as 别名]
# ダミーの短歌と啄木の短歌を結合して入力データとして整形
X = np.concatenate((dummy_vectors, takuboku_vectors))
y = np.concatenate((np.zeros(len(dummy_vectors)), np.ones(len(takuboku_vectors))))
# 学習精度を測るための分類機を用意
pre_classifier = SVC(kernel='linear')
# 5分割交差検定で学習精度を計測
scores = cross_val_score(pre_classifier, X, y, cv=5)
# 計測したスコアを報告
print('SVM Score:', scores.mean())
# 同じパラメータで今度はすべての入力データを用いて学習を行う
classifier = SVC(kernel='linear', probability=True)
classifier.fit(X, y)
# 「最後の一首」候補の短歌をラベルデータとベクトルデータに分割
tanka_vectors_labels = np.array([list(row)[0] for row in tanka_vectors])
tanka_vectors_data = np.array([list(row)[1:] for row in tanka_vectors])
# 分類機を用いて学習し、それぞれの「啄木の短歌らしさ」を計測する
proba = classifier.predict_log_proba(tanka_vectors_data)
# 「啄木の短歌らしさ」の高い順に並べる
sorted_proba = np.array(sorted(zip(proba, tanka_vectors_labels), key=lambda r:r[0][1]))
# 上位30件を報告
print('\n'.join(['{0:>2}. {1}'.format(i + 1, p[1].decode("utf-8")) for i, p in enumerate(sorted_proba[:30])]))