本文整理汇总了Python中sklearn.datasets.base.Bunch.label方法的典型用法代码示例。如果您正苦于以下问题:Python Bunch.label方法的具体用法?Python Bunch.label怎么用?Python Bunch.label使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.datasets.base.Bunch
的用法示例。
在下文中一共展示了Bunch.label方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calc_tfidf
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import label [as 别名]
def calc_tfidf(trainsetfile,stopwordfile,dstdir):
data_set = joblib.load(trainsetfile)
wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
wordbag.target_name = data_set.tatget_name
wordbag.label = data_set.label
corpus = data_set.contents
stopwordlist = read_stopword(stopwordfile)
vectorize = TfidfVectorizer(sublinear_tf=True,max_df = 0.8,min_df=3,max_features=50000,stop_words=stopwordlist)
feature_train = vectorize.fit_transform(corpus)
wordbag.tdm = feature_train
wordbag.vocabulary = vectorize.vocabulary_
joblib.dump(wordbag,dstdir+"/"+"word_bag.data",compress=3)
示例2: testset_tfidf
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import label [as 别名]
def testset_tfidf(testsetfile,stopwordfile,myvocabulary):
data_set = joblib.load(testsetfile)
wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
wordbag.target_name = data_set.tatget_name
wordbag.label = data_set.label
corpus = data_set.contents
stopwordlist = read_stopword(stopwordfile)
vectorize = TfidfVectorizer(sublinear_tf=True,stop_words=stopwordlist,vocabulary=myvocabulary)
feature_train = vectorize.fit_transform(corpus)
wordbag.tdm = feature_train
joblib.dump(wordbag,"test_wordbag/test_word_bag.data",compress=3)
return wordbag
示例3: reload
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import label [as 别名]
reload(sys)
#导入训练预料
data_set={}
#训练语料集路径
train_path='text_corpus1_wordbag/train_set.data'
file_obj=open(train_path,'rb')
#读取持久化后的对象
data_set=pickle.load(file_obj)
file_obj.close()
#定义词袋数据结构
wordbag=Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
wordbag.target_name=data_set.target_name
wordbag.label=data_set.label
wordbag.filenames=data_set.filenames
#构建语料
corpus=data_set.contents
#从文件导入停用词表
stpwrdpath='extra_dict/hlt_stop_words.txt'
stpwrd_dic=open(stpwrdpath,'rb')
stpwrd_content=stpwrd_dic.read()
#将停用词转换为list
stpwrdlst=stpwrd_content.splitlines()
stpwrd_dic.close()
#计算词袋创建时间:获取开始时间
示例4: fetch_20newsgroups
# 需要导入模块: from sklearn.datasets.base import Bunch [as 别名]
# 或者: from sklearn.datasets.base.Bunch import label [as 别名]
import pickle # 引入持久化类
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
##################################################################
## 导入数据
categories = ["alt.atheism", "soc.religion.christian", "comp.graphics", "sci.med"] # 选取需要下载的新闻分类
data_set = fetch_20newsgroups(subset="train", categories=categories, shuffle=True, random_state=42) # 下载并获取训练数据, 也是先全部下载, 再提取部分
print(data_set.target_names) # ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
##################################################################
## 定义词袋数据结构
# tdm:tf-idf 计算后词袋
stpwrdlst = [] # 停用词表为 空
wordbag = Bunch(target_name=[], label=[], filenames=[], tdm=[], vocabulary={}, stpwrdlst=[])
wordbag.target_name = data_set.target_names
wordbag.label = data_set.target
wordbag.filenames = data_set.filenames
wordbag.stpwrdlst = stpwrdlst
vectorizer = CountVectorizer(stop_words=stpwrdlst) # 使用 TfidfVectorizer 初始化向量空间模型--创建词袋
transformer = TfidfTransformer() # 该类会统计每个词语的 tf-idf 权值
fea_train = vectorizer.fit_transform(data_set.data) # 文本转为词频矩阵
print(fea_train.shape) # (2257, 35788); 2257 篇文档, 35788 个单词
wordbag.tdm = fea_train # 为 tdm 赋值
wordbag.vocabulary = vectorizer.vocabulary_
##################################################################
## 创建词袋的持久化
file_obj = open("tmp.data", "wb")
pickle.dump(wordbag, file_obj)
file_obj.close()