本文整理汇总了Python中classifier.Classifier.load_model方法的典型用法代码示例。如果您正苦于以下问题:Python Classifier.load_model方法的具体用法?Python Classifier.load_model怎么用?Python Classifier.load_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类classifier.Classifier
的用法示例。
在下文中一共展示了Classifier.load_model方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: EmojiRecommender
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import load_model [as 别名]
class EmojiRecommender():
def __init__(self, fname_model, fname_embed, fname_dataset):
print >> sys.stderr, 'EmojiRecommender: [info] loading word index...'
self.windexer = WordIndexer.load(fname_embed)
print >> sys.stderr, 'EmojiRecommender: [info] loading model...'
self.clf = Classifier()
self.clf.load_model(fname_model)
print >> sys.stderr, 'EmojiRecommender: [info] loading emojis...'
ecode_split = cPickle.load(open(fname_dataset, 'r'))
self.emojis = [emo for emo, split in ecode_split]
self.ydim = len(self.emojis)
print >> sys.stderr, 'EmojiRecommender: [info] initialization done'
def preprocess(self, text):
text = text.decode('utf8')
seq = zhtokenizer.tokenize(text)
idxs = self.windexer.seq2idx(seq)
return idxs
def predict_proba(self, text):
idxs = self.preprocess(text)
if len(idxs) == 0:
return None
else:
return self.clf.predict_proba(idxs)
def recommend(self, text, n = 5):
proba = self.predict_proba(text)
if proba is None:
eids = [i for i in range(n)]
scores = [0. for i in range(n)]
else:
ranks = [(i, proba[i]) for i in range(self.ydim)]
ranks = sorted(ranks, key = lambda k:-k[1])
eids = [ranks[i][0] for i in range(n)]
scores = [ranks[i][1] for i in range(n)]
res = [{'emoji':self.emojis[eid], 'score':'%.2f'%(score)} for eid, score in zip(eids, scores)]
return res
示例2: confused_docs_to_file
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import load_model [as 别名]
def confused_docs_to_file(confused, file_name):
with codecs.open(file_name, 'w', 'utf-8') as f:
for c in confused:
line = c[0]+' '+c[1]+' '+c[2]+'\n'
f.write(line)
CONFUSED_MATRIX_POSTFIX = '_confusion_matrix.csv'
CONFUSED_POSTFIX = '_confused.txt'
CORRECT_POSTFIX = '_correct.txt'
CLASSIFIER_DATA_DIRECTORY = 'classifiers_data'
if __name__ == "__main__":
c = Classifier()
c.load_model(TRAINING_FILE_OUTPUT)
test_file = TEST_FILE
# Grab all test data from the file.
test_data = []
actual_class = []
with codecs.open(test_file, 'r', 'utf-8') as f:
for line in f:
class_name, text = line.split('\t', 1)
text = text.strip()
token_list = []
for token in token_iterator(text, TOKEN_PATTERN):
token_list.append(token)
test_data.append(token_list)
actual_class.append(class_name)