本文整理汇总了Python中classifier.Classifier.classify方法的典型用法代码示例。如果您正苦于以下问题:Python Classifier.classify方法的具体用法?Python Classifier.classify怎么用?Python Classifier.classify使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类classifier.Classifier
的用法示例。
在下文中一共展示了Classifier.classify方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_classify
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def test_classify():
proxy = ReviewsMongoProxy("tripadvisor_train")
review = proxy.find_review_by_id(proxy.next_random_review_id())
classifier = Classifier("../tripadvisor/aspect_nltk_nb.pkl")
classifier.classify(review)
print_review(review)
示例2: cl_button_clicked_cb
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def cl_button_clicked_cb(self, button):
"""Classify button callback
:param button: signal came from this button
"""
if not len(self.sel_files):
return
self.counter = -1
for row in self.sel_files:
Classifier.classify(self.all_files[row], MainWindow.SR, row, self.update_classify_progress_cb)
示例3: classify
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def classify(info_hash):
conn = Database.get_conn()
c = conn.cursor()
try:
c.execute('''
SELECT name, info_hash, perm_category FROM torrents WHERE info_hash = {0}
'''.format(Database._placeholder), (info_hash,))
torrent = c.fetchone()
c.execute('''
SELECT path, size FROM files WHERE info_hash = {0}
'''.format(Database._placeholder), (info_hash,))
files = c.fetchall()
category, tags = Classifier.classify(torrent[0], files, torrent[2])
c.execute('''
UPDATE torrents SET category = {0}, tags = {0}, classifier_version = {0} WHERE info_hash = {0}
'''.format(Database._placeholder), (category, json.dumps(tags), Classifier.version, info_hash,))
Database.logger.debug("Classified: (%s)(%s)" % (info_hash,torrent[0]))
try:
conn.commit()
except:
Database.logger.critical("Failed to commit to db")
except:
raise
conn.close()
示例4: main
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def main():
dbinfo = recover()
conn = MySQLdb.connect(**dbinfo)
cur = conn.cursor()
#Learn
sql = "SELECT id,article_text,trainpos,trainneg,trainneutral FROM articles WHERE trainset=1 AND (trainpos>0 OR trainneg>0 OR trainneutral>0)"
cur.execute(sql)
a = Learner()
for aid,article_text,trainpos,trainneg,trainneutral in cur.fetchall():
aid = int(aid)
items = [ (1, int(trainpos)),(0, int(trainneutral)),(-1, int(trainneg)) ]
classification = max(items, key=lambda x : x[1])[0]
a.add_string(article_text, classification)
a.train()
#Predict
sql = "SELECT id,article_text FROM articles"
cur.execute(sql)
b = Classifier(a)
for aid,article_text in cur.fetchall():
aid = int(aid)
classification = b.classify(article_text)
sql = "UPDATE articles SET score=%s WHERE id=%s"
args = [classification,aid]
cur.execute(sql,args)
print aid,classification
conn.commit()
示例5: test_classify_by_randomforest
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def test_classify_by_randomforest():
stock_d = testdata()
ti = TechnicalIndicators(stock_d)
filename = 'test_N225_randomforest.pickle'
clffile = os.path.join(os.path.dirname(
os.path.abspath(__file__)),
'..', 'clf',
filename)
if os.path.exists(clffile):
os.remove(clffile)
clf = Classifier(filename)
ti.calc_ret_index()
ret = ti.stock['ret_index']
train_X, train_y = clf.train(ret, classifier="Random Forest")
eq_(filename, os.path.basename(clf.filename))
r = round(train_X[-1][-1], 5)
expected = 1.35486
eq_(r, expected)
r = round(train_X[0][0], 5)
expected = 1.08871
eq_(r, expected)
expected = 14
r = len(train_X[0])
eq_(r, expected)
expected = 120
r = len(train_X)
eq_(r, expected)
expected = [1, 0, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 1, 0, 0, 1, 0, 1, 0, 1,
1, 0, 1, 1, 1, 1, 1, 0, 1, 0,
1, 1, 1, 1, 0, 1, 0, 1, 1, 0,
1, 0, 0, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 1, 0, 0, 1, 1, 1, 1,
1, 0, 1, 0, 0, 0, 0, 0, 0, 1,
1, 1, 0, 0, 1, 0, 1, 1, 0, 1,
1, 0, 1, 1, 0, 1, 0, 0, 1, 0,
1, 1, 0, 0, 1, 0, 1, 0, 1, 1,
1, 1, 1, 0, 1, 1, 1, 0, 0, 1,
1, 0, 0, 1, 1, 1, 0, 1, 1, 0]
for r, e in zip(train_y, expected):
eq_(r, e)
expected = 1
test_y = clf.classify(ret)
assert(test_y[0] == 0 or test_y[0] == 1)
if os.path.exists(clffile):
os.remove(clffile)
示例6: classifier
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def classifier(search_query) :
cls = Classifier(' '.join(search_query.split('_')))
classified_output = cls.classify()
if classified_output != None and len(classified_output) > 0 :
with open("output/" + search_query+".json","w") as out :
out.write(json.dumps(classified_output))
return json.dumps({"query" : search_query, "status": "Success"})
else :
return json.dumps({"query" : search_query, "status": "Failed"})
示例7: TextAnalyzerModule
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
class TextAnalyzerModule(ALModule):
def __init__(self, name):
ALModule.__init__(self, name)
self.classifier = Classifier()
self.responder = Responder()
self.tts = ALProxy("ALTextToSpeech")
def say(self, text):
sentiment = self.classifier.classify(text)
response = self.responder.get_response(sentiment)
print("Responding with '%s'" % response)
self.tts.say(response)
示例8: run
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def run(self):
global worksqueue, spectImg
classifier = Classifier()
while True:
sample = worksqueue.get()
worksqueue.task_done()
result = classifier.classify(sample)
print "\nPreciction: %s\n" % result
spectImg = writeMFCC(sample, RATE)
示例9: main
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def main():
args = parser.parse_args()
full_data_json = read_dataset(args.data)
# for n in xrange(30, len(full_data_json), 30):
for n in [len(full_data_json)]:
corrects = 0
total = 0
for _ in xrange(SAMPLES):
random.shuffle(full_data_json)
data_json = full_data_json[:n]
training_set_ratio = 0.7
training_set_size = int(training_set_ratio * len(data_json) + 0.5)
training_set = data_json[:training_set_size]
test_set = data_json[training_set_size:]
processor = TextProcessor()
classifier = Classifier(processor)
classifier.train(training_set)
for example in test_set:
text = example["content"]
predicted_tag = classifier.classify(text)
expected_tag = classifier.normalize_tag_label(example["tag"])
if expected_tag in Classifier.IGNORE_TAGS:
continue
if predicted_tag == expected_tag:
corrects += 1
else:
# print 'expected = {}, predicted = {}'.format(expected_tag, predicted_tag)
pass
total += 1
print "{} {}".format(len(data_json), float(corrects) / total)
示例10: Commonwords
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
self.dump_information()
if __name__ == "__main__":
common = Commonwords(commonwords_path)
event_tagger = tagger.Tagger(classifier="event.ser.gz", port=1111)
todo_tagger = tagger.Tagger(classifier="todo.ser.gz", port=2222)
all_tagger = tagger.Tagger(classifier="all.ser.gz", port=3333)
filenames = [f for f in os.listdir(test_dir) if os.path.isfile(os.path.join(test_dir, f))]
classifier = Classifier(all_tagger, common)
for filename in filenames:
if filename == "freq":
continue
msg = open(os.path.join(test_dir, filename), "r").read()
print "++++++++++++++++++++++++++++++++++++++++"
print msg
print "++++++++++++++++++++++++++++++++++++++++"
cls = classifier.classify(msg)
# print cls
if cls == "EVENT":
t = tokenizer.Tokenizer(msg, event_tagger)
e = EventExtractor(t.get_toks())
e.extract_event()
elif cls == "TODO":
t = tokenizer.Tokenizer(msg, todo_tagger)
e = TodoExtractor(t.get_toks())
e.extract_todo()
print "++++++++++++++++++++++++++++++++++++++++"
示例11: print
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
print("")
sys.exit()
# initialize Retriever
retriever = Retriever(sys.argv[1])
collections = retriever.get_collections()
# rank collections by sentiment
all_docs = reduce((lambda x, y: x + y), [v for k,v in collections.iteritems()], [])
all_sentiments = [float(doc.get("sentiment")) for doc in all_docs]
classifier = Classifier(all_sentiments)
reduce_lambda = lambda sum, val: sum + (classifier.classify(float(val.get("sentiment")))[1])
sentiments_t = { k: reduce(reduce_lambda, v, 0.0) for k, v in collections.iteritems() }
sentiments = {k: v/len(collections[k]) for k, v in sentiments_t.iteritems() }
# intro printer
print("")
print("COMP479 Final Project")
print(" by ")
print("Connor Bode, Greg Houle, Michael bla")
print("")
# question 1
print("Q1. Which is the most positive Department in ENCS at Concordia?")
most_positive_tuple = lambda current_highest, current_tuple: current_highest if current_highest[1] > current_tuple[1] else current_tuple
positive = reduce(most_positive_tuple, [(k, v) for k, v in sentiments.iteritems()])
示例12: int
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
if __name__ == "__main__":
process_at_a_time = int(sys.argv[1])
# Load data from files.
listings = get_data("listings.txt")
random.shuffle(listings)
products = get_data("products.txt")
classifier = Classifier(products)
classified_listings = get_data("classified_listings.txt")
old_listings = map(lambda x: UnenforcedFrozenDict(x['listing']), classified_listings)
added = 0
for listing in listings:
if not UnenforcedFrozenDict(listing) in old_listings:
print str(listing)
suggested_product = classifier.classify(listing)
if suggested_product is not None:
print "Suggested product: %s" % str(suggested_product)
yn = ''
while not yn.lower() in ['y', 'n']:
yn = raw_input('Is the suggested product correct? (y/n) ')
if yn == 'y':
product_name = suggested_product["product_name"]
else:
product_name = raw_input('Enter product name: ')
else:
product_name = raw_input('Enter product name: ')
if product_name == '':
classified_listings.append({'listing' : listing, 'product_name' : None })
else:
示例13: main
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description="""
Shopping listing classifier tool. Run in a directory containing listings.txt and products.txt. Produces results.txt.""")
parser.add_argument('-v','--verbose', help='display extra information for debugging',
action='store_true')
parser.add_argument('-d','--diagnostic', help='use classified_listings.txt, which contains listings with correct products.',
action='store_true')
args = parser.parse_args()
# Load data from files.
products = get_data("products.txt")
if args.diagnostic:
classified_listings = get_data("classified_listings.txt")
listings = map(lambda x: x["listing"], classified_listings)
listing_to_product = {}
for classified_listing in classified_listings:
product_name = classified_listing["product_name"]
listing = UnenforcedFrozenDict(classified_listing["listing"])
listing_to_product[listing] = product_name
else:
listings = get_data("listings.txt")
# Create a classifier for the provided products.
classifier = Classifier(products)
# Classify all listings.
results = defaultdict(list)
classified = 0
correct = 0
positive_error = 0
negative_error = 0
for listing in listings:
product = classifier.classify(listing, verbose=args.verbose)
if args.diagnostic:
correct_product_name = listing_to_product[UnenforcedFrozenDict(listing)]
if product is None:
if args.diagnostic and correct_product_name is not None:
print "NEGATIVE ERROR: None instead of %s\nListing: %s\n" % (correct_product_name, str(listing))
negative_error += 1
elif args.diagnostic:
correct += 1
else:
classified += 1
product_name = product["product_name"]
results[product_name].append(listing)
if args.diagnostic and product_name != correct_product_name:
positive_error += 1
print "POSITIVE ERROR: %s instead of %s\nListing: %s\n" % (product_name, correct_product_name, str(listing))
elif args.diagnostic:
correct += 1
if args.verbose:
print "Classification rate: %.02f" % (float(classified) / len(listings))
if args.diagnostic:
print "Total listings: %d" % len(listings)
print "Total classified: %d" % classified
print "Correct: %d (%.02f)" % (correct, (float(correct) / len(listings)))
print "Positive errors: %d (%.02f)" % (positive_error, (float(positive_error) / len(listings)))
print "Negative errors: %d (%.02f)" % (negative_error, (float(negative_error) / len(listings)))
# Process results dictionary into array of Result objects.
processed_results = []
for (product_name, classified_listings) in results.items():
processed_results.append({
'product_name' : product_name,
'listings' : classified_listings
})
# Print results to "results.txt".
print_results("results.txt", processed_results)
示例14: main
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
def main():
clf = Classifier("weights.txt", "etas.txt")
with open(K_TRAINING_FILE, 'r') as training_file:
for record in training_file.readlines():
(person, label) = parse_record(record)
clf.classify(person, label=label)
示例15: Classifier
# 需要导入模块: from classifier import Classifier [as 别名]
# 或者: from classifier.Classifier import classify [as 别名]
## - December 10, 2015
## - An interactive classifier
import sys
from classifier import Classifier
print "\nPlease wait while the training data is loaded.."
myClassifier = Classifier()
myClassifier.load()
print "Ready for input"
filename = raw_input("Enter a file name or a directory (type \"quit\" to quit) > ")
while filename != "quit":
if ".txt" in filename:
with open(filename, 'r') as infile:
clsfy = myClassifier.classify(infile)
if clsfy > 0:
print "Positive! Weight = {0}".format(clsfy)
elif clsfy < 0:
print "Negative! Weight = {0}".format(clsfy)
elif clsfy == 0:
print "Undertermined"
else:
myClassifier.classify_reviews(filename + "*.txt")
filename = raw_input("Enter a file name or a directory > ")