本文整理匯總了Python中sklearn.feature_extraction.text.TfidfVectorizer.tokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python TfidfVectorizer.tokenizer方法的具體用法?Python TfidfVectorizer.tokenizer怎麽用?Python TfidfVectorizer.tokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.feature_extraction.text.TfidfVectorizer
的用法示例。
在下文中一共展示了TfidfVectorizer.tokenizer方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_most_similar_prop
# 需要導入模塊: from sklearn.feature_extraction.text import TfidfVectorizer [as 別名]
# 或者: from sklearn.feature_extraction.text.TfidfVectorizer import tokenizer [as 別名]
def get_most_similar_prop(self, text_en, subject_tokens, tokens, print_top_n=5):
# not include <main_word> in BagOfWord dictionary
vect = TfidfVectorizer(ngram_range=(1, 3), sublinear_tf=True,
tokenizer=txt.QATokenizer('property', debug_info=True),
stop_words=subject_tokens)
prop_descrs = self.get_prop_descrs()
if prop_descrs:
props_matrix = vect.fit_transform(prop_descrs)
# Change tokenizer to handle questions
vect.tokenizer = txt.QATokenizer('question', debug_info=True)
q_vector = vect.transform([text_en])
print('Bag of words vocabulary:', vect.get_feature_names())
sims = cosine_similarity(q_vector, props_matrix).flatten()
top_sims = sims.argsort()[:-print_top_n - 1:-1]
top_n_properties = itemgetter(*top_sims)(self.get_properties())
print('Top {0} properties by Bag of Words similarity:'.format(print_top_n),
*zip(top_n_properties, sims[top_sims]), sep='\n')
# return Property and confidence level
return top_n_properties[0], sims[top_sims][0]
示例2: train_tfidf
# 需要導入模塊: from sklearn.feature_extraction.text import TfidfVectorizer [as 別名]
# 或者: from sklearn.feature_extraction.text.TfidfVectorizer import tokenizer [as 別名]
def train_tfidf(self, tokenizer='custom', corpus='news'):
if tokenizer == 'custom':
tokenizer = self.tokenize
nltk_corpus = []
if corpus == 'all':
nltk_corpus += [nltk.corpus.gutenberg.raw(f_id) for f_id in nltk.corpus.gutenberg.fileids()]
nltk_corpus += [nltk.corpus.webtext.raw(f_id) for f_id in nltk.corpus.webtext.fileids()]
nltk_corpus += [nltk.corpus.brown.raw(f_id) for f_id in nltk.corpus.brown.fileids()]
nltk_corpus += [nltk.corpus.reuters.raw(f_id) for f_id in nltk.corpus.reuters.fileids()]
elif corpus == 'news':
nltk_corpus += self.get_bbc_news_corpus()
if self.verbose:
print "LENGTH nltk corpus corpus: {}".format(sum([len(d) for d in nltk_corpus]))
vectorizer = TfidfVectorizer(
max_df=1.0,
min_df=2,
encoding='utf-8',
decode_error='strict',
max_features=None,
stop_words='english',
ngram_range=(1, 3),
norm='l2',
tokenizer=tokenizer,
use_idf=True,
sublinear_tf=False)
#vectorizer.fit_transform(nltk_corpus)
vectorizer.fit(nltk_corpus)
# Avoid having to pickle instance methods, we will set this method on on load
vectorizer.tokenizer = None
keys = np.array(vectorizer.vocabulary_.keys(), dtype=str)
values = np.array(vectorizer.vocabulary_.values(), dtype=int)
stop_words = np.array(list(vectorizer.stop_words_), dtype=str)
with tables.openFile(self.data_path + 'tfidf_keys.hdf', 'w') as f:
atom = tables.Atom.from_dtype(keys.dtype)
ds = f.createCArray(f.root, 'keys', atom, keys.shape)
ds[:] = keys
with tables.openFile(self.data_path + 'tfidf_values.hdf', 'w') as f:
atom = tables.Atom.from_dtype(values.dtype)
ds = f.createCArray(f.root, 'values', atom, values.shape)
ds[:] = values
with tables.openFile(self.data_path + 'tfidf_stop_words.hdf', 'w') as f:
atom = tables.Atom.from_dtype(stop_words.dtype)
ds = f.createCArray(f.root, 'stop_words', atom, stop_words.shape)
ds[:] = stop_words
vectorizer.vocabulary_ = None
vectorizer.stop_words_ = None
with open(self.data_path + 'tfidf.pkl', 'wb') as fin:
cPickle.dump(vectorizer, fin)
vectorizer.vocabulary_ = dict(zip(keys, values))
vectorizer.stop_words_ = stop_words
return vectorizer