當前位置: 首頁>>代碼示例>>Python>>正文


Python movie_reviews.fileids方法代碼示例

本文整理匯總了Python中nltk.corpus.movie_reviews.fileids方法的典型用法代碼示例。如果您正苦於以下問題:Python movie_reviews.fileids方法的具體用法?Python movie_reviews.fileids怎麽用?Python movie_reviews.fileids使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在nltk.corpus.movie_reviews的用法示例。


在下文中一共展示了movie_reviews.fileids方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load_movie_reviews

# 需要導入模塊: from nltk.corpus import movie_reviews [as 別名]
# 或者: from nltk.corpus.movie_reviews import fileids [as 別名]
def load_movie_reviews():

    # movie_reviews is a sizeable corpus to import, so only load it if we have to
    from nltk.corpus import movie_reviews
    try:
        movie_reviews.categories()
    except:
        import nltk
        print('This appears to be your first time using the NLTK Movie Reviews corpus. We will first download the necessary corpus (this is a one-time download that might take a little while')
        nltk.download('movie_reviews')
        from nltk.corpus import movie_reviews

    raw_data = []

    # NLTK's corpus is structured in an interesting way
    # first iterate through the two categories (pos and neg)
    for category in movie_reviews.categories():

        if category == 'pos':
            pretty_category_name = 'positive'
        elif category == 'neg':
            pretty_category_name = 'negative'

        # each of these categories is just fileids, so grab those
        for fileid in movie_reviews.fileids(category):

            # then each review is a NLTK class where each item in that class instance is a word
            review_words = movie_reviews.words(fileid)
            review_text = ''

            for word in review_words:
                review_text += ' ' + word

            review_dictionary = {
                'text': review_text,
                'sentiment': pretty_category_name
            }

            raw_data.append(review_dictionary)

    return raw_data 
開發者ID:ClimbsRocks,項目名稱:empythy,代碼行數:43,代碼來源:utils.py

示例2: demo_movie_reviews

# 需要導入模塊: from nltk.corpus import movie_reviews [as 別名]
# 或者: from nltk.corpus.movie_reviews import fileids [as 別名]
def demo_movie_reviews(trainer, n_instances=None, output=None):
    """
    Train classifier on all instances of the Movie Reviews dataset.
    The corpus has been preprocessed using the default sentence tokenizer and
    WordPunctTokenizer.
    Features are composed of:
        - most frequent unigrams

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total reviews that have to be used for
        training and testing. Reviews will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.corpus import movie_reviews
    from nltk.sentiment import SentimentAnalyzer

    if n_instances is not None:
        n_instances = int(n_instances/2)

    pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]]
    neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]]
    # We separately split positive and negative instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_docs = train_pos_docs+train_neg_docs
    testing_docs = test_pos_docs+test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words = sentim_analyzer.all_words(training_docs)

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print('Your classifier does not provide a show_most_informative_features() method.')
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__,
                        Tokenizer='WordPunctTokenizer', Feats=extr, Results=results,
                        Instances=n_instances) 
開發者ID:SignalMedia,項目名稱:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda,代碼行數:54,代碼來源:util.py

示例3: demo_movie_reviews

# 需要導入模塊: from nltk.corpus import movie_reviews [as 別名]
# 或者: from nltk.corpus.movie_reviews import fileids [as 別名]
def demo_movie_reviews(trainer, n_instances=None, output=None):
    """
    Train classifier on all instances of the Movie Reviews dataset.
    The corpus has been preprocessed using the default sentence tokenizer and
    WordPunctTokenizer.
    Features are composed of:
        - most frequent unigrams

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total reviews that have to be used for
        training and testing. Reviews will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.corpus import movie_reviews
    from sentiment_analyzer import SentimentAnalyzer

    if n_instances is not None:
        n_instances = int(n_instances/2)

    pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]]
    neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]]
    # We separately split positive and negative instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_docs = train_pos_docs+train_neg_docs
    testing_docs = test_pos_docs+test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words = sentim_analyzer.all_words(training_docs)

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print('Your classifier does not provide a show_most_informative_features() method.')
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__,
                        Tokenizer='WordPunctTokenizer', Feats=extr, Results=results,
                        Instances=n_instances) 
開發者ID:jarrellmark,項目名稱:neighborhood_mood_aws,代碼行數:54,代碼來源:util.py

示例4: getFeatures

# 需要導入模塊: from nltk.corpus import movie_reviews [as 別名]
# 或者: from nltk.corpus.movie_reviews import fileids [as 別名]
def getFeatures(numWordsToUse):
    # stopwords are common words that occur so frequently as to be useless for NLP
    stopWords = set(stopwords.words('english'))


    # read in all the words of each movie review, and it's associated sentiment
    reviewDocuments = []
    sentiment = []

    for category in movie_reviews.categories():
        for fileid in movie_reviews.fileids(category):
            reviewWords = movie_reviews.words(fileid)

            cleanedReview = []
            for word in reviewWords:
                if word not in stopWords:
                    cleanedReview.append(word)

            reviewDocuments.append(cleanedReview)
            if category == 'pos':
                sentiment.append(1)
            elif category == 'neg':
                sentiment.append(0)
            else:
                print 'We are not sure what this category is: ' + category

    global popularWords
    formattedReviews, sentiment, popularWords = utils.nlpFeatureEngineering(
            reviewDocuments, sentiment, 50, numWordsToUse, 'counts'
        )


    # transform list of dictionaries into a sparse matrix
    sparseFeatures = dv.fit_transform(formattedReviews)

    return sparseFeatures, sentiment 
開發者ID:ClimbsRocks,項目名稱:nlpSentiment,代碼行數:38,代碼來源:nltkMovieReviews.py


注:本文中的nltk.corpus.movie_reviews.fileids方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。