當前位置: 首頁>>代碼示例>>Python>>正文


Python TfidfVectorizer.stop_words_方法代碼示例

本文整理匯總了Python中sklearn.feature_extraction.text.TfidfVectorizer.stop_words_方法的典型用法代碼示例。如果您正苦於以下問題:Python TfidfVectorizer.stop_words_方法的具體用法?Python TfidfVectorizer.stop_words_怎麽用?Python TfidfVectorizer.stop_words_使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.feature_extraction.text.TfidfVectorizer的用法示例。


在下文中一共展示了TfidfVectorizer.stop_words_方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train_model

# 需要導入模塊: from sklearn.feature_extraction.text import TfidfVectorizer [as 別名]
# 或者: from sklearn.feature_extraction.text.TfidfVectorizer import stop_words_ [as 別名]
def train_model(texts, points, num_classses, model_dir, text_encoding='utf-8'):
	""" Given an iterable of (text, lat, lon) items, cluster the points into #num_classes and use
	them as labels, then extract unigram features, train a classifier and save it in models/model_name
	for future use. 

	Args:
	texts -- an iterable (e.g. a list) of texts e.g. ['this is the first text', 'this is the second text'].
	points -- an iterable (e.g. a list) of tuples in the form of (lat, lon) where coordinates are of type float e.g. [(1.2343, -10.239834r),(5.634534, -12.47563)]
	num_classes -- the number of desired clusters/labels/classes of the model.
	model_name -- the name of the directory within models/ that the model will be saved.
	"""
	
	if os.path.exists(model_dir):
		logging.error("Model directory " + model_dir + " already exists, please try another address.")
		sys.exit(-1)
	else:
		os.mkdir(model_dir)
	
	from sklearn.cluster import KMeans
	from sklearn.feature_extraction.text import TfidfVectorizer
	from sklearn.linear_model.stochastic_gradient import SGDClassifier
	
	kmeans = KMeans(n_clusters=num_classses, random_state=0)
	points_arr = numpy.array(points)
	kmeans.fit_transform(points_arr)
	cluster_centers = kmeans.cluster_centers_
	sample_clusters = kmeans.labels_
	label_coordinate = {}
	for i in range(cluster_centers.shape[0]):
		lat, lon = cluster_centers[i, 0], cluster_centers[i, 1]
		label_coordinate[i] = (lat, lon)
	
	logging.info('extracting features from text...')
	vectorizer = TfidfVectorizer(encoding=text_encoding, stop_words='english', ngram_range=(1,1), max_df=0.5, min_df=0, binary=True, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True)
	X_train = vectorizer.fit_transform(texts)
	Y_train = sample_clusters
	vectorizer.stop_words_ = None
	logging.info('the number of samples is %d and the number of features is %d' % (X_train.shape[0], X_train.shape[1]))
	
	logging.info('training the classifier...')
	logging.warn('Note that alpha (regularisation strength) should be tuned based on the performance on validation data.')
	clf = SGDClassifier(loss='log', penalty='elasticnet', alpha=5e-5, l1_ratio=0.9, fit_intercept=True, n_iter=5, n_jobs=2, random_state=0, learning_rate="optimal")
	clf.fit(X_train, Y_train)
	clf.coef_ = csr_matrix(clf.coef_)
	
	logging.info('retrieving address of the given points using geopy (requires internet access).')
	coordinate_address = retrieve_location_from_coordinates(label_coordinate.values())

	logging.info('dumping the the vectorizer, clf (trained model), label_coordinates and coordinate_locations into pickle files in ' + model_dir)
	dump_model(clf, vectorizer, coordinate_address, label_coordinate, model_dir)
開發者ID:afshinrahimi,項目名稱:pigeo,代碼行數:52,代碼來源:pigeo.py

示例2: train_tfidf

# 需要導入模塊: from sklearn.feature_extraction.text import TfidfVectorizer [as 別名]
# 或者: from sklearn.feature_extraction.text.TfidfVectorizer import stop_words_ [as 別名]
	def train_tfidf(self, tokenizer='custom', corpus='news'):

		if tokenizer == 'custom':
			tokenizer = self.tokenize

		nltk_corpus = []
		if corpus == 'all':
			nltk_corpus += [nltk.corpus.gutenberg.raw(f_id) for f_id in nltk.corpus.gutenberg.fileids()]
			nltk_corpus += [nltk.corpus.webtext.raw(f_id) for f_id in nltk.corpus.webtext.fileids()]
			nltk_corpus += [nltk.corpus.brown.raw(f_id) for f_id in nltk.corpus.brown.fileids()]
			nltk_corpus += [nltk.corpus.reuters.raw(f_id) for f_id in nltk.corpus.reuters.fileids()]
		elif corpus == 'news':
			nltk_corpus += self.get_bbc_news_corpus()

		if self.verbose:
			print "LENGTH nltk corpus corpus: {}".format(sum([len(d) for d in nltk_corpus]))


		vectorizer = TfidfVectorizer(
			max_df=1.0,
			min_df=2,
			encoding='utf-8',
			decode_error='strict',
			max_features=None,
			stop_words='english',
			ngram_range=(1, 3),
			norm='l2',
			tokenizer=tokenizer,
			use_idf=True,
			sublinear_tf=False)

		#vectorizer.fit_transform(nltk_corpus)
		vectorizer.fit(nltk_corpus)
		# Avoid having to pickle instance methods, we will set this method on on load
		vectorizer.tokenizer = None
		keys = np.array(vectorizer.vocabulary_.keys(), dtype=str)
		values = np.array(vectorizer.vocabulary_.values(), dtype=int)
		stop_words = np.array(list(vectorizer.stop_words_), dtype=str)

		with tables.openFile(self.data_path + 'tfidf_keys.hdf', 'w') as f:
			atom = tables.Atom.from_dtype(keys.dtype)
			ds = f.createCArray(f.root, 'keys', atom, keys.shape)
			ds[:] = keys

		with tables.openFile(self.data_path + 'tfidf_values.hdf', 'w') as f:
			atom = tables.Atom.from_dtype(values.dtype)
			ds = f.createCArray(f.root, 'values', atom, values.shape)
			ds[:] = values

		with tables.openFile(self.data_path + 'tfidf_stop_words.hdf', 'w') as f:
			atom = tables.Atom.from_dtype(stop_words.dtype)
			ds = f.createCArray(f.root, 'stop_words', atom, stop_words.shape)
			ds[:] = stop_words

		vectorizer.vocabulary_ = None
		vectorizer.stop_words_ = None

		with open(self.data_path + 'tfidf.pkl', 'wb') as fin:
			cPickle.dump(vectorizer, fin)

		vectorizer.vocabulary_ = dict(zip(keys, values))
		vectorizer.stop_words_ = stop_words

		return vectorizer
開發者ID:webeng,項目名稱:feature_engineering,代碼行數:66,代碼來源:keywords.py


注:本文中的sklearn.feature_extraction.text.TfidfVectorizer.stop_words_方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。