當前位置: 首頁>>代碼示例>>Python>>正文


Python Bunch.label方法代碼示例

本文整理匯總了Python中sklearn.datasets.base.Bunch.label方法的典型用法代碼示例。如果您正苦於以下問題:Python Bunch.label方法的具體用法?Python Bunch.label怎麽用?Python Bunch.label使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在sklearn.datasets.base.Bunch的用法示例。


在下文中一共展示了Bunch.label方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: calc_tfidf

# 需要導入模塊: from sklearn.datasets.base import Bunch [as 別名]
# 或者: from sklearn.datasets.base.Bunch import label [as 別名]
def calc_tfidf(trainsetfile,stopwordfile,dstdir):
    data_set = joblib.load(trainsetfile)
    wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label
    
    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,max_df = 0.8,min_df=3,max_features=50000,stop_words=stopwordlist)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    wordbag.vocabulary = vectorize.vocabulary_
    joblib.dump(wordbag,dstdir+"/"+"word_bag.data",compress=3)
開發者ID:wadeallstar,項目名稱:python-fraud-detect,代碼行數:15,代碼來源:process_tool.py

示例2: testset_tfidf

# 需要導入模塊: from sklearn.datasets.base import Bunch [as 別名]
# 或者: from sklearn.datasets.base.Bunch import label [as 別名]
def testset_tfidf(testsetfile,stopwordfile,myvocabulary):
    data_set = joblib.load(testsetfile)
    wordbag = Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
    wordbag.target_name = data_set.tatget_name
    wordbag.label = data_set.label
    
    corpus = data_set.contents
    stopwordlist = read_stopword(stopwordfile)
    vectorize = TfidfVectorizer(sublinear_tf=True,stop_words=stopwordlist,vocabulary=myvocabulary)
    feature_train = vectorize.fit_transform(corpus)
    wordbag.tdm = feature_train
    joblib.dump(wordbag,"test_wordbag/test_word_bag.data",compress=3)
    return wordbag
    
開發者ID:wadeallstar,項目名稱:python-fraud-detect,代碼行數:15,代碼來源:process_tool.py

示例3: reload

# 需要導入模塊: from sklearn.datasets.base import Bunch [as 別名]
# 或者: from sklearn.datasets.base.Bunch import label [as 別名]
reload(sys)

#導入訓練預料
data_set={}
#訓練語料集路徑
train_path='text_corpus1_wordbag/train_set.data'
file_obj=open(train_path,'rb')

#讀取持久化後的對象
data_set=pickle.load(file_obj)
file_obj.close()

#定義詞袋數據結構
wordbag=Bunch(target_name=[],label=[],filenames=[],tdm=[],vocabulary={})
wordbag.target_name=data_set.target_name
wordbag.label=data_set.label
wordbag.filenames=data_set.filenames

#構建語料
corpus=data_set.contents

#從文件導入停用詞表
stpwrdpath='extra_dict/hlt_stop_words.txt'
stpwrd_dic=open(stpwrdpath,'rb')
stpwrd_content=stpwrd_dic.read()

#將停用詞轉換為list
stpwrdlst=stpwrd_content.splitlines()
stpwrd_dic.close()

#計算詞袋創建時間:獲取開始時間
開發者ID:Pengfei-Zhu,項目名稱:DataMining,代碼行數:33,代碼來源:tf-idffinal.py

示例4: fetch_20newsgroups

# 需要導入模塊: from sklearn.datasets.base import Bunch [as 別名]
# 或者: from sklearn.datasets.base.Bunch import label [as 別名]
import pickle  # 引入持久化類
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
##################################################################
## 導入數據
categories = ["alt.atheism", "soc.religion.christian", "comp.graphics", "sci.med"]  # 選取需要下載的新聞分類
data_set = fetch_20newsgroups(subset="train", categories=categories, shuffle=True, random_state=42)  # 下載並獲取訓練數據, 也是先全部下載, 再提取部分
print(data_set.target_names)  # ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian']
##################################################################
## 定義詞袋數據結構
# tdm:tf-idf 計算後詞袋
stpwrdlst = []  # 停用詞表為 空
wordbag = Bunch(target_name=[], label=[], filenames=[], tdm=[], vocabulary={}, stpwrdlst=[])
wordbag.target_name = data_set.target_names
wordbag.label = data_set.target
wordbag.filenames = data_set.filenames
wordbag.stpwrdlst = stpwrdlst

vectorizer = CountVectorizer(stop_words=stpwrdlst)  # 使用 TfidfVectorizer 初始化向量空間模型--創建詞袋
transformer = TfidfTransformer()  # 該類會統計每個詞語的 tf-idf 權值
fea_train = vectorizer.fit_transform(data_set.data)  # 文本轉為詞頻矩陣
print(fea_train.shape)  # (2257, 35788); 2257 篇文檔, 35788 個單詞

wordbag.tdm = fea_train  # 為 tdm 賦值
wordbag.vocabulary = vectorizer.vocabulary_
##################################################################
## 創建詞袋的持久化
file_obj = open("tmp.data", "wb")
pickle.dump(wordbag, file_obj)
file_obj.close()
開發者ID:coder352,項目名稱:shellscript,代碼行數:33,代碼來源:l20_Bunch-封裝_pickle-保存.py


注:本文中的sklearn.datasets.base.Bunch.label方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。