本文整理汇总了Python中textblob.classifiers.NaiveBayesClassifier.labels方法的典型用法代码示例。如果您正苦于以下问题:Python NaiveBayesClassifier.labels方法的具体用法?Python NaiveBayesClassifier.labels怎么用?Python NaiveBayesClassifier.labels使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类textblob.classifiers.NaiveBayesClassifier
的用法示例。
在下文中一共展示了NaiveBayesClassifier.labels方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_test
# 需要导入模块: from textblob.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from textblob.classifiers.NaiveBayesClassifier import labels [as 别名]
def run_test(train, test, name):
print "Training..."
cll = NaiveBayesClassifier(train)
print "Done training\n"
accuracy = cll.accuracy(test)
print "Accuracy: " + str(accuracy)
# get matching lists of predicted and true labels
pred_labels = list()
true_labels = list()
for obj in test:
prob_label = cll.prob_classify(obj[0]).max()
true_label = obj[1]
true_labels.append(true_label)
pred_labels.append(prob_label)
# transform our labels to numbers
labels = cll.labels()
i = 0
label_num = dict()
for label in labels:
label_num[label] = i
i = i + 1
# match our predicted and true labels with the number representations
true_label_nums = list()
pred_label_nums = list()
for true_l, pred_l in zip(true_labels, pred_labels):
true_label_nums.append(label_num[true_l])
pred_label_nums.append(label_num[pred_l])
cm = confusion_matrix(true_label_nums, pred_label_nums)
print cm
print "\n"
with open("test_results.txt", "a") as tr:
tr.write(str(name) + "\n")
tr.write(str(accuracy) + "\n")
tr.write(str(cm))
tr.write("\n\n")
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(cm)
plt.title("Confusion Matrix For "+name)
fig.colorbar(cax)
ax.set_xticklabels(['']+labels)
ax.set_yticklabels(['']+labels)
plt.xlabel("Predicted")
plt.ylabel("True")
plt.savefig('plots/'+name+'.pdf', bbox_inches='tight')
示例2: TestNaiveBayesClassifier
# 需要导入模块: from textblob.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from textblob.classifiers.NaiveBayesClassifier import labels [as 别名]
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.classifier = NaiveBayesClassifier(train_set)
def test_default_extractor(self):
text = "I feel happy this morning."
assert_equal(self.classifier.extract_features(text), basic_extractor(text, train_set))
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_classify_a_list_of_words(self):
res = self.classifier.classify(["I", "feel", "happy", "this", "morning"])
assert_equal(res, "positive")
def test_train_from_lists_of_words(self):
# classifier can be trained on lists of words instead of strings
train = [(doc.split(), label) for doc, label in train_set]
classifier = NaiveBayesClassifier(train)
assert_equal(classifier.accuracy(test_set),
self.classifier.accuracy(test_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), "positive")
assert_true(res.prob("positive") > res.prob("negative"))
def test_accuracy(self):
acc = self.classifier.accuracy(test_set)
assert_true(isinstance(acc, float))
def test_update(self):
res1 = self.classifier.prob_classify("lorem ipsum")
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
res2 = self.classifier.prob_classify("lorem ipsum")
assert_true(res2.prob("positive") > res1.prob("positive"))
assert_equal(original_length + 1, new_length)
def test_labels(self):
labels = self.classifier.labels()
assert_true("positive" in labels)
assert_true("negative" in labels)
def test_show_informative_features(self):
feats = self.classifier.show_informative_features()
def test_informative_features(self):
feats = self.classifier.informative_features(3)
assert_true(isinstance(feats, list))
assert_true(isinstance(feats[0], tuple))
def test_custom_feature_extractor(self):
cl = NaiveBayesClassifier(train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
def test_init_with_csv_file(self):
with open(CSV_FILE) as fp:
cl = NaiveBayesClassifier(fp, format="csv")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_csv_file_without_format_specifier(self):
with open(CSV_FILE) as fp:
cl = NaiveBayesClassifier(fp)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file(self):
with open(JSON_FILE) as fp:
cl = NaiveBayesClassifier(fp, format="json")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file_without_format_specifier(self):
with open(JSON_FILE) as fp:
cl = NaiveBayesClassifier(fp)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_custom_format(self):
redis_train = [('I like turtles', 'pos'), ('I hate turtles', 'neg')]
class MockRedisFormat(formats.BaseFormat):
def __init__(self, client, port):
self.client = client
self.port = port
@classmethod
def detect(cls, stream):
return True
#.........这里部分代码省略.........
示例3: NaiveBayesClassifier
# 需要导入模块: from textblob.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from textblob.classifiers.NaiveBayesClassifier import labels [as 别名]
'what are you working on',
'what you making')
experience_utterances = [(x, 'experience') for x in experience_utterances]
environment_utterances = [(x, 'enivornment') for x in environment_utterances]
working_on_utterances = [(x, 'working') for x in working_on_utterances]
# FIXME: find better way to flatten lists together
training_set = []
training_set.extend(experience_utterances)
training_set.extend(environment_utterances)
training_set.extend(working_on_utterances)
classifier = NaiveBayesClassifier(training_set)
print(classifier.show_informative_features(), classifier.labels())
bogus_utterances = (
'if you going to use nltk u may want to check this out spacy .io',
'sup people? I see the weather\'s getting better over there, Ben.',
'i had the same problem your having so thats my i made my own.',
'try http, instead of https'
)
# TODO: Figure out how to make this stronger
dual_utterance = ('how long have you been coding and what IDE do you use',)
test_utterances = ('what are you making',
'hey that nyancat is cool, how do you get that?')
for t in test_utterances:
示例4: TestNaiveBayesClassifier
# 需要导入模块: from textblob.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from textblob.classifiers.NaiveBayesClassifier import labels [as 别名]
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.classifier = NaiveBayesClassifier(train_set)
def test_default_extractor(self):
text = "I feel happy this morning."
assert_equal(self.classifier.extract_features(text), basic_extractor(text, train_set))
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_classify_a_list_of_words(self):
res = self.classifier.classify(["I", "feel", "happy", "this", "morning"])
assert_equal(res, "positive")
def test_train_from_lists_of_words(self):
# classifier can be trained on lists of words instead of strings
train = [(doc.split(), label) for doc, label in train_set]
classifier = NaiveBayesClassifier(train)
assert_equal(classifier.accuracy(test_set),
self.classifier.accuracy(test_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), "positive")
assert_true(res.prob("positive") > res.prob("negative"))
def test_accuracy(self):
acc = self.classifier.accuracy(test_set)
assert_true(isinstance(acc, float))
def test_update(self):
res1 = self.classifier.prob_classify("lorem ipsum")
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
res2 = self.classifier.prob_classify("lorem ipsum")
assert_true(res2.prob("positive") > res1.prob("positive"))
assert_equal(original_length + 1, new_length)
def test_labels(self):
labels = self.classifier.labels()
assert_true("positive" in labels)
assert_true("negative" in labels)
def test_show_informative_features(self):
feats = self.classifier.show_informative_features()
def test_informative_features(self):
feats = self.classifier.informative_features(3)
assert_true(isinstance(feats, list))
assert_true(isinstance(feats[0], tuple))
def test_custom_feature_extractor(self):
cl = NaiveBayesClassifier(train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
def test_init_with_csv_file(self):
cl = NaiveBayesClassifier(CSV_FILE, format="csv")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_csv_file_without_format_specifier(self):
cl = NaiveBayesClassifier(CSV_FILE)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file(self):
cl = NaiveBayesClassifier(JSON_FILE, format="json")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file_without_format_specifier(self):
cl = NaiveBayesClassifier(JSON_FILE)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_accuracy_on_a_csv_file(self):
a = self.classifier.accuracy(CSV_FILE)
assert_true(isinstance(a, float))
def test_accuracy_on_json_file(self):
a = self.classifier.accuracy(JSON_FILE)
assert_true(isinstance(a, float))
def test_init_with_tsv_file(self):
cl = NaiveBayesClassifier(TSV_FILE)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_bad_format_specifier(self):
#.........这里部分代码省略.........