本文整理汇总了Python中nltk.corpus.stopwords方法的典型用法代码示例。如果您正苦于以下问题:Python corpus.stopwords方法的具体用法?Python corpus.stopwords怎么用?Python corpus.stopwords使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nltk.corpus
的用法示例。
在下文中一共展示了corpus.stopwords方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: remove_stopwords
# 需要导入模块: from nltk import corpus [as 别名]
# 或者: from nltk.corpus import stopwords [as 别名]
def remove_stopwords(tokens):
"""
Returns a list of all words in tokens not found in stopwords
Args:
tokens: tokens to remove stopwords from
stopwords: path to a stopwords text file. Expects each word on its own line
Returns:
lst with stopwords removed
"""
filtered_list = []
with open(PATH_TO_STOPWORDS, 'r') as f:
stopwords_list = [x.strip() for x in f.readlines()]
# use a set, lookup is quicker
stopwords_set = set(stopwords_list)
for word in tokens:
if word not in stopwords_set:
filtered_list.append(word)
return filtered_list
示例2: preprocess
# 需要导入模块: from nltk import corpus [as 别名]
# 或者: from nltk.corpus import stopwords [as 别名]
def preprocess(tweet,stopwords):
#print "ok"
tweet = tweet.replace("#sarcasm","")
tweet = tweet.replace("#sarcastic","")
tweet = re.sub(r"(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)", "", tweet)
tweet = re.sub(r'^https?:\/\/.*[\r\n]*', '', tweet, flags=re.MULTILINE)
table = string.maketrans("","")
tweet=tweet.translate(table, "?/:^&*()[email protected]$%:;',<.>-+*\{\}[]\"")
stemmer = SnowballStemmer("english",ignore_stopwords=True)
tokens = tweet.split()
tokens = [ w for w in tokens if w not in stopwords]
tokens = [item for item in tokens if item.isalpha()]
tokens = [ stemmer.stem(w) for w in tokens ]
return tokens
示例3: __init__
# 需要导入模块: from nltk import corpus [as 别名]
# 或者: from nltk.corpus import stopwords [as 别名]
def __init__(self, text, stopwords):
self.rawtext = self.filter(text)
self.uniqueWords = []
self.isContextBuilt = False
self.unicodeErrors = 0
self.stopwords = stopwords
self.rawTokens = []
self.tokenizeRawText()
示例4: buildContext
# 需要导入模块: from nltk import corpus [as 别名]
# 或者: from nltk.corpus import stopwords [as 别名]
def buildContext(self):
if self.isContextBuilt == False:
sometext = self.rawtext.translate(None, string.punctuation)
st = LancasterStemmer()
sometext = sometext.split()
for each in sometext:
try:
ev = st.stem(each.lower())
if ev not in self.stopwords:
self.uniqueWords.append(ev)
except UnicodeDecodeError as e:
self.unicodeErrors = self.unicodeErrors + 1
self.isContextBuilt = True
return self.uniqueWords