当前位置: 首页>>代码示例>>Python>>正文


Python feature.IDF属性代码示例

本文整理汇总了Python中pyspark.mllib.feature.IDF属性的典型用法代码示例。如果您正苦于以下问题:Python feature.IDF属性的具体用法?Python feature.IDF怎么用?Python feature.IDF使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在pyspark.mllib.feature的用法示例。


在下文中一共展示了feature.IDF属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parseTextRDDToIndex

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def parseTextRDDToIndex(self, data, label=True):

        if label:
            labels = data.map(lambda line: float(line.split(" ", 1)[0]))
            documents = data.map(lambda line: line.split(" ", 1)[1].split(" "))
        else:
            documents = data.map(lambda line: line.split(" "))

        tf = HashingTF().transform(documents)
        tf.cache()

        idfIgnore = IDF(minDocFreq=2).fit(tf)
        index = idfIgnore.transform(tf)

        if label:
            return labels.zip(index).map(lambda line: LabeledPoint(line[0], line[1]))
        else:
            return index 
开发者ID:openstack,项目名称:meteos,代码行数:20,代码来源:meteos-script-1.6.0.py

示例2: produce_tfidf

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def produce_tfidf(x):
    tf = HashingTF().transform(x)
    idf = IDF(minDocFreq=5).fit(tf)
    tfidf = idf.transform(tf)
    return tfidf

# Load in reviews 
开发者ID:lcdm-uiuc,项目名称:cs199-sp17,代码行数:9,代码来源:bayes_tfidf.py

示例3: test_idf_model

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def test_idf_model(self):
        data = [
            Vectors.dense([1, 2, 6, 0, 2, 3, 1, 1, 0, 0, 3]),
            Vectors.dense([1, 3, 0, 1, 3, 0, 0, 2, 0, 0, 1]),
            Vectors.dense([1, 4, 1, 0, 0, 4, 9, 0, 1, 2, 0]),
            Vectors.dense([2, 1, 0, 3, 0, 0, 5, 0, 2, 3, 9])
        ]
        model = IDF().fit(self.sc.parallelize(data, 2))
        idf = model.idf()
        self.assertEqual(len(idf), 11) 
开发者ID:alec-heif,项目名称:MIT-Thesis,代码行数:12,代码来源:tests.py

示例4: get_tfidf_features

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def get_tfidf_features(txt_rdd):
    hashingTF = HashingTF()
    tf = hashingTF.transform(txt_rdd)
    tf.cache()
    idf = IDF().fit(tf)
    tfidf = idf.transform(tf)

    return tfidf 
开发者ID:hanhanwu,项目名称:Hanhan_Play_With_Social_Media,代码行数:10,代码来源:reddit_tfidf_LDA.py

示例5: classify_tweet

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def classify_tweet(tf):
	return IDF().fit(tf).transform(tf) 
开发者ID:xuwenyihust,项目名称:Twitter-Hashtag-Tracking,代码行数:4,代码来源:analysis.py

示例6: extractKeywords_Train

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def extractKeywords_Train(self):
        documents = self.sc.textFile(self.trainingfile).map(lambda line: line.split(" ")[1:])

        hashingTF = HashingTF()
        tf = hashingTF.transform(documents)
        tf.cache()

        idfIgnore = IDF(minDocFreq=2).fit(tf)
        tfidfIgnore = idfIgnore.transform(tf)

        tfidfIgnore.saveAsTextFile("AAA") 
开发者ID:Labyrinth108,项目名称:Content-Based-News-Recommendation-System-in-Spark,代码行数:13,代码来源:engine.py

示例7: getRecommendation

# 需要导入模块: from pyspark.mllib import feature [as 别名]
# 或者: from pyspark.mllib.feature import IDF [as 别名]
def getRecommendation(self, user_id):

        user_news, candidates_news, candidates_newsid = self.getUserReadNews(user_id)
        all_news = user_news + candidates_news

        # ??????????????
        vectorizer = CountVectorizer()
        # ??????????
        X = vectorizer.fit_transform(all_news)
        # ????????????
        # word = vectorizer.get_feature_names()

        transformer = TfidfTransformer()
        # ?????X???TF-IDF?
        tfidf = transformer.fit_transform(X).toarray()
        # ?????? tfidf[i][j]??i?????tf-idf??
        # print tfidf.toarray()

        recommend_num = 10
        recommend_per_news = recommend_num / len(user_news)
        recommend_list = []
        user_news_len = len(user_news)
        candidates_news_len = len(candidates_news)

        for i in range(user_news_len):
            news_candidate_sim = []
            for j in range(candidates_news_len):
                sim = 1 - spatial.distance.cosine(tfidf[i], tfidf[j + user_news_len])
                news_candidate_sim.append(sim)
            k_max_index = (-np.array(news_candidate_sim)).argsort()[:recommend_per_news]
            recommend_list.extend(k_max_index)

        recommend_news_id = [candidates_newsid[i] for i in recommend_list]
        return recommend_news_id

    # def getKeywords(self):
    #
    #     news = sc.parallelize(self.getUserReadNews())
    #     x = news.collect()
    #     hashing = HashingTF()
    #
    #     news_tf = hashing.transform(news)
    #     idfIgnore = IDF(minDocFreq=2).fit(news_tf)
    #     result = idfIgnore.transform(news_tf) 
开发者ID:Labyrinth108,项目名称:Content-Based-News-Recommendation-System-in-Spark,代码行数:46,代码来源:engine.py


注:本文中的pyspark.mllib.feature.IDF属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。