本文整理汇总了Python中text.classifiers.NaiveBayesClassifier.show_informative_features方法的典型用法代码示例。如果您正苦于以下问题:Python NaiveBayesClassifier.show_informative_features方法的具体用法?Python NaiveBayesClassifier.show_informative_features怎么用?Python NaiveBayesClassifier.show_informative_features使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类text.classifiers.NaiveBayesClassifier
的用法示例。
在下文中一共展示了NaiveBayesClassifier.show_informative_features方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NaiveBayesClassifier
# 需要导入模块: from text.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from text.classifiers.NaiveBayesClassifier import show_informative_features [as 别名]
test = [
('The beer was good.', 'pos'),
('I do not enjoy my job', 'neg'),
("I ain't feeling dandy today.", 'neg'),
("I feel amazing!", 'pos'),
('Gary is a friend of mine.', 'pos'),
("I can't believe I'm doing this.", 'neg')
]
cl = NaiveBayesClassifier(train)
# Classify some text
print(cl.classify("Their burgers are amazing.")) # "pos"
print(cl.classify("I don't like their pizza.")) # "neg"
# Classify a TextBlob
blob = TextBlob("The beer was amazing. But the hangover was horrible. "
"My boss was not pleased.", classifier=cl)
print(blob)
print(blob.classify())
for sentence in blob.sentences:
print(sentence)
print(sentence.classify())
# Compute accuracy
print("Accuracy: {0}".format(cl.accuracy(test)))
# Show 5 most informative features
cl.show_informative_features(5)
示例2: TestNaiveBayesClassifier
# 需要导入模块: from text.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from text.classifiers.NaiveBayesClassifier import show_informative_features [as 别名]
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.train_set = [
('I love this car', 'positive'),
('This view is amazing', 'positive'),
('I feel great this morning', 'positive'),
('I am so excited about the concert', 'positive'),
('He is my best friend', 'positive'),
('I do not like this car', 'negative'),
('This view is horrible', 'negative'),
('I feel tired this morning', 'negative'),
('I am not looking forward to the concert', 'negative'),
('He is my enemy', 'negative')
]
self.classifier = NaiveBayesClassifier(self.train_set)
self.test_set = [('I feel happy this morning', 'positive'),
('Larry is my friend.', 'positive'),
('I do not like that man.', 'negative'),
('My house is not great.', 'negative'),
('Your song is annoying.', 'negative')]
def test_basic_extractor(self):
text = "I feel happy this morning."
feats = basic_extractor(text, self.train_set)
assert_true(feats["contains(feel)"])
assert_true(feats['contains(morning)'])
assert_false(feats["contains(amazing)"])
def test_default_extractor(self):
text = "I feel happy this morning."
assert_equal(self.classifier.extract_features(text), basic_extractor(text, self.train_set))
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(self.train_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), "positive")
assert_true(res.prob("positive") > res.prob("negative"))
def test_accuracy(self):
acc = self.classifier.accuracy(self.test_set)
assert_true(isinstance(acc, float))
def test_update(self):
res1 = self.classifier.prob_classify("lorem ipsum")
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
res2 = self.classifier.prob_classify("lorem ipsum")
assert_true(res2.prob("positive") > res1.prob("positive"))
assert_equal(original_length + 1, new_length)
def test_show_informative_features(self):
feats = self.classifier.show_informative_features()
def test_informative_features(self):
feats = self.classifier.informative_features(3)
assert_true(isinstance(feats, list))
assert_true(isinstance(feats[0], tuple))
def test_custom_feature_extractor(self):
cl = NaiveBayesClassifier(self.train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
示例3: TestNaiveBayesClassifier
# 需要导入模块: from text.classifiers import NaiveBayesClassifier [as 别名]
# 或者: from text.classifiers.NaiveBayesClassifier import show_informative_features [as 别名]
class TestNaiveBayesClassifier(unittest.TestCase):
def setUp(self):
self.classifier = NaiveBayesClassifier(train_set)
def test_basic_extractor(self):
text = "I feel happy this morning."
feats = basic_extractor(text, train_set)
assert_true(feats["contains(feel)"])
assert_true(feats['contains(morning)'])
assert_false(feats["contains(amazing)"])
def test_default_extractor(self):
text = "I feel happy this morning."
assert_equal(self.classifier.extract_features(text), basic_extractor(text, train_set))
def test_classify(self):
res = self.classifier.classify("I feel happy this morning")
assert_equal(res, 'positive')
assert_equal(len(self.classifier.train_set), len(train_set))
def test_classify_a_list_of_words(self):
res = self.classifier.classify(["I", "feel", "happy", "this", "morning"])
assert_equal(res, "positive")
def test_train_from_lists_of_words(self):
# classifier can be trained on lists of words instead of strings
train = [(doc.split(), label) for doc, label in train_set]
classifier = NaiveBayesClassifier(train)
assert_equal(classifier.accuracy(test_set),
self.classifier.accuracy(test_set))
def test_prob_classify(self):
res = self.classifier.prob_classify("I feel happy this morning")
assert_equal(res.max(), "positive")
assert_true(res.prob("positive") > res.prob("negative"))
def test_accuracy(self):
acc = self.classifier.accuracy(test_set)
assert_true(isinstance(acc, float))
def test_update(self):
res1 = self.classifier.prob_classify("lorem ipsum")
original_length = len(self.classifier.train_set)
self.classifier.update([("lorem ipsum", "positive")])
new_length = len(self.classifier.train_set)
res2 = self.classifier.prob_classify("lorem ipsum")
assert_true(res2.prob("positive") > res1.prob("positive"))
assert_equal(original_length + 1, new_length)
def test_labels(self):
labels = self.classifier.labels()
assert_true("positive" in labels)
assert_true("negative" in labels)
def test_show_informative_features(self):
feats = self.classifier.show_informative_features()
def test_informative_features(self):
feats = self.classifier.informative_features(3)
assert_true(isinstance(feats, list))
assert_true(isinstance(feats[0], tuple))
def test_custom_feature_extractor(self):
cl = NaiveBayesClassifier(train_set, custom_extractor)
cl.classify("Yay! I'm so happy it works.")
assert_equal(cl.train_features[0][1], 'positive')
def test_init_with_csv_file(self):
cl = NaiveBayesClassifier(CSV_FILE, format="csv")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_csv_file_without_format_specifier(self):
cl = NaiveBayesClassifier(CSV_FILE)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file(self):
cl = NaiveBayesClassifier(JSON_FILE, format="json")
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_init_with_json_file_without_format_specifier(self):
cl = NaiveBayesClassifier(JSON_FILE)
assert_equal(cl.classify("I feel happy this morning"), 'pos')
training_sentence = cl.train_set[0][0]
assert_true(isinstance(training_sentence, unicode))
def test_accuracy_on_a_csv_file(self):
a = self.classifier.accuracy(CSV_FILE)
assert_true(isinstance(a, float))
def test_accuracy_on_json_file(self):
a = self.classifier.accuracy(JSON_FILE)
assert_true(isinstance(a, float))
#.........这里部分代码省略.........