当前位置: 首页>>代码示例>>Python>>正文


Python nltk.PorterStemmer方法代码示例

本文整理汇总了Python中nltk.PorterStemmer方法的典型用法代码示例。如果您正苦于以下问题:Python nltk.PorterStemmer方法的具体用法?Python nltk.PorterStemmer怎么用?Python nltk.PorterStemmer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在nltk的用法示例。


在下文中一共展示了nltk.PorterStemmer方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def __init__(
        self,
        anonymize=True,
        trim_window=5,
        lowercase=True,
        drop_stopwords=True,
        stem=True,
        ngram_range=(1, 3),
        **vectorizer_kwargs,
    ):
        self.anonymize = anonymize
        self.lowercase = lowercase
        self.drop_stopwords = drop_stopwords
        if drop_stopwords:
            nltk.download("stopwords")
            self.stopwords = set(nltk.corpus.stopwords.words("english"))
        self.trim_window = trim_window
        self.stem = stem
        if stem:
            self.porter = nltk.PorterStemmer()

        self.vectorizer = CountVectorizer(
            ngram_range=ngram_range, binary=True, **vectorizer_kwargs
        ) 
开发者ID:HazyResearch,项目名称:metal,代码行数:26,代码来源:ngram_featurizer.py

示例2: _create_frequency_table

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def _create_frequency_table(text_string) -> dict:
    """
    we create a dictionary for the word frequency table.
    For this, we should only use the words that are not part of the stopWords array.

    Removing stop words and making frequency table
    Stemmer - an algorithm to bring words to its root word.
    :rtype: dict
    """
    stopWords = set(stopwords.words("english"))
    words = word_tokenize(text_string)
    ps = PorterStemmer()

    freqTable = dict()
    for word in words:
        word = ps.stem(word)
        if word in stopWords:
            continue
        if word in freqTable:
            freqTable[word] += 1
        else:
            freqTable[word] = 1

    return freqTable 
开发者ID:akashp1712,项目名称:nlp-akash,代码行数:26,代码来源:TF_IDF_Summarization.py

示例3: _create_frequency_matrix

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def _create_frequency_matrix(sentences):
    frequency_matrix = {}
    stopWords = set(stopwords.words("english"))
    ps = PorterStemmer()

    for sent in sentences:
        freq_table = {}
        words = word_tokenize(sent)
        for word in words:
            word = word.lower()
            word = ps.stem(word)
            if word in stopWords:
                continue

            if word in freq_table:
                freq_table[word] += 1
            else:
                freq_table[word] = 1

        frequency_matrix[sent[:15]] = freq_table

    return frequency_matrix 
开发者ID:akashp1712,项目名称:nlp-akash,代码行数:24,代码来源:TF_IDF_Summarization.py

示例4: stem_text

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def stem_text(sent, context=None):
    processed_tokens = []
    tokens = nltk.word_tokenize(sent)
    porter = nltk.PorterStemmer()
    for t in tokens:
        t = porter.stem(t)
        processed_tokens.append(t)

    return " ".join(processed_tokens)

# Split to train and test sample sets: 
开发者ID:CatalystCode,项目名称:corpus-to-graph-ml,代码行数:13,代码来源:data_preparation_tools.py

示例5: stemmed_words

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def stemmed_words(self) -> List:
        """
        Compute the stems of words.

        Uses nltk.PorterStemmer.

        Returns:
            List

        """
        words = self.words()
        porter = nltk.PorterStemmer()
        return [porter.stem(w) for w in words] 
开发者ID:gender-bias,项目名称:gender-bias,代码行数:15,代码来源:document.py

示例6: stem

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def stem(word):
    ps = PorterStemmer()
    return ps.stem(word) 
开发者ID:MTG,项目名称:freesound-datasets,代码行数:5,代码来源:utils.py

示例7: stem

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def stem(cls, w: str):
        if not w or len(w.strip()) == 0:
            return ""
        w_lower = w.lower()
        # Remove leading articles from the phrase (e.g., the rays => rays).
        # FIXME: change this logic to accept a list of leading articles.
        if w_lower.startswith("a "):
            w_lower = w_lower[2:]
        elif w_lower.startswith("an "):
            w_lower = w_lower[3:]
        elif w_lower.startswith("the "):
            w_lower = w_lower[4:]
        elif w_lower.startswith("your "):
            w_lower = w_lower[5:]
        elif w_lower.startswith("his "):
            w_lower = w_lower[4:]
        elif w_lower.startswith("their "):
            w_lower = w_lower[6:]
        elif w_lower.startswith("my "):
            w_lower = w_lower[3:]
        elif w_lower.startswith("another "):
            w_lower = w_lower[8:]
        elif w_lower.startswith("other "):
            w_lower = w_lower[6:]
        elif w_lower.startswith("this "):
            w_lower = w_lower[5:]
        elif w_lower.startswith("that "):
            w_lower = w_lower[5:]
        # Porter stemmer: rays => ray
        return PorterStemmer().stem(w_lower).strip() 
开发者ID:allenai,项目名称:propara,代码行数:32,代码来源:eval.py

示例8: stemming

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def stemming(tokens):
    """
    stem tokens
    """
    porter = nltk.PorterStemmer()
    return [porter.stem(t) for t in tokens] 
开发者ID:KevinLiao159,项目名称:Quora,代码行数:8,代码来源:nlp.py

示例9: get_ngrams

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def get_ngrams(tokens, n, use_just_words=False, stem=False, for_semantics=False):
    if len(n) < 1:
        return {}
    if not for_semantics:
        if stem:
            porter = PorterStemmer()
            tokens = [porter.stem(t.lower()) for t in tokens]
        if use_just_words:
            tokens = [t.lower() for t in tokens if not t.startswith('@') and not t.startswith('#')
                      and t not in string.punctuation]
    ngram_tokens = []
    for i in n:
        for gram in ngrams(tokens, i):
            string_token = 'gram '
            for j in range(i):
                string_token += gram[j] + ' '
            ngram_tokens.append(string_token)
    ngram_features = {i: ngram_tokens.count(i) for i in set(ngram_tokens)}
    return ngram_features


# Get sentiment features -- a total of 18 features derived
# Emoji features: a count of the positive, negative and neutral emojis
# along with the ratio of positive to negative emojis and negative to neutral
# Using the MPQA subjectivity lexicon, we have to check words for their part of speech
# and obtain features: a count of positive, negative and neutral words, as well as
# a count of the strong and weak subjectives, along with their ratios and a total sentiment words.
# Also using VADER sentiment analyser to obtain a score of sentiments held in a tweet (4 features) 
开发者ID:MirunaPislar,项目名称:Sarcasm-Detection,代码行数:30,代码来源:extract_statistical_features.py

示例10: __init__

# 需要导入模块: import nltk [as 别名]
# 或者: from nltk import PorterStemmer [as 别名]
def __init__(self, lower: bool = True, stemmer="port"):
        self.lower = lower
        self.stemmer = stemmer
        if stemmer == "port":
            self._stemmer = PorterStemmer()
            self._stem = self._stemmer.stem
        elif stemmer == "wordnet":
            self._stemmer = WordNetLemmatizer()
            self._stem = self._stemmer.lemmatize
        else:
            raise ValueError(stemmer)
        # stemming is slow, so we cache words as we go
        self.normalize_cache = {} 
开发者ID:allenai,项目名称:document-qa,代码行数:15,代码来源:text_utils.py


注:本文中的nltk.PorterStemmer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。