本文整理汇总了Python中nltk.RegexpTokenizer类的典型用法代码示例。如果您正苦于以下问题:Python RegexpTokenizer类的具体用法?Python RegexpTokenizer怎么用?Python RegexpTokenizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RegexpTokenizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: french_tokenizer
def french_tokenizer(text):
from nltk import RegexpTokenizer
tokenizer = RegexpTokenizer(r"(?u)\b\w\w+\b")
toks = tokenizer.tokenize(text)
# We also lemmatize!
# toks = [fr_lexicon.get(t, t) for t in toks]
return toks
示例2: _get_ngram_features
def _get_ngram_features(infile, ngram_size):
"""
Returns a dictionary containing ngrams and counts observed in a given file
:param infile: file to be analysed
:param ngram_size: ngram size
:return: dict of ngrams/counts
"""
# tokenizer which remove punctuation
tokenizer = RegexpTokenizer(r'\w+')
# dictionary on ngrams and counts
d_ngrams = defaultdict(int)
# stopwords
stops = set(stopwords.words("english"))
# lemmatizer for stemming
lemmatizer = WordNetLemmatizer()
# load train data
with open(infile) as tsv:
file_reader = reader(tsv, dialect="excel-tab")
# skip title line
file_reader.next()
for line in file_reader:
s_text = line[2]
# remove punctuation and tokenize
l_text = tokenizer.tokenize(s_text)
# remove stopwords and stem
l_text = [lemmatizer.lemmatize(word) for word in l_text if word not in stops]
# get the ngrams for the given line
l_temp = ngrams(l_text, ngram_size)
for ngram in l_temp:
d_ngrams[ngram] += 1
return d_ngrams
示例3: create_bag_of_words
def create_bag_of_words(document_list):
"""
Creates a bag of words representation of the document list given. It removes
the punctuation and the stop words.
:type document_list: list[str]
:param document_list:
:rtype: list[list[str]]
:return:
"""
tokenizer = RegexpTokenizer(r'\w+')
cached_stop_words = set(stopwords.words("english"))
body = []
processed = []
# remove common words and tokenize
# texts = [[word for word in document.lower().split() if word not in stopwords.words('english')]
# for document in reviews]
for i in range(0, len(document_list)):
body.append(document_list[i].lower())
for entry in body:
row = tokenizer.tokenize(entry)
processed.append([word for word in row if word not in cached_stop_words])
return processed
示例4: test
def test():
global N, words, network
print 'In testing.'
gettysburg = """Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow -- this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
tokenizer = RegexpTokenizer('\w+')
gettysburg_tokens = tokenizer.tokenize(gettysburg)
samples = []
for token in gettysburg_tokens:
word = token.lower()
if word not in ENGLISH_STOP_WORDS and word not in punctuation:
samples.append(word)
dist = FreqDist(samples)
V = Vol(1, 1, N, 0.0)
for i, word in enumerate(words):
V.w[i] = dist.freq(word)
pred = network.forward(V).w
topics = []
while len(topics) != 5:
max_act = max(pred)
topic_idx = pred.index(max_act)
topic = words[topic_idx]
if topic in gettysburg_tokens:
topics.append(topic)
del pred[topic_idx]
print 'Topics of the Gettysburg Address:'
print topics
示例5: get_documents_text
def get_documents_text(act_id, **kwargs):
"""
Returns the concatenated, tag-stripped text of all documents related to act_id
"""
db_conn = kwargs['db']
italian_stops = set(stopwords.words('italian'))
cursor = db_conn.cursor(MySQLdb.cursors.DictCursor)
sql = """
select d.testo
from opp_documento as d
where d.atto_id=%s
"""
cursor.execute(sql, act_id)
rows = cursor.fetchall()
cursor.close()
testo = u''
for row in rows:
# strip html tags from texts, if present
testo += unicode(
strip_tags(
row['testo']
)
)
# remove stopwords
tokenizer = RegexpTokenizer("[\w]+")
words = tokenizer.tokenize(testo)
filtered_testo = " ".join([word for word in words if word.lower() not in italian_stops])
return filtered_testo
示例6: gen_vocab
def gen_vocab(vocab_fname, path):
print("\ngen_vocab:{}".format(vocab_fname))
""" reads in a csv file,
outputs as python list in given path
as pickled object. unicode.
Also add unigrams for every line"""
# open file pointer
f = codecs.open(path+vocab_fname, 'r', "utf-8")
# output list
concepts = []
# read in lines
for line in f.readlines():
concepts = concepts + line.lower().strip("\n").split(',')
# from observation the concept lists all had ''
while ('' in concepts):
concepts.remove('')
# add unigrams to concepts. does not preserve order of list
unigrams = set()
set_concepts = set(concepts)
tokenizer = RegexpTokenizer(ur'\w+')
for phrase in concepts:
unigrams.update(tokenizer.tokenize(phrase))
set_concepts.update(unigrams)
return list(set_concepts)
示例7: gen_counts
def gen_counts(path_corpus, list_corpus):
""" creates np array, for each corpus file how many words
in that document """
# create output
counts_corpus = np.zeros(len(list_corpus))
fp = None
txt = u''
tokens = []
tokenizer = RegexpTokenizer(ur'\w+')
count = 0
every = 500
for f in list_corpus:
# read in text
fp = codecs.open(path_corpus+f, 'r', "utf-8", errors="ignore")
txt = fp.read()
txt = txt.lower()
fp.close()
# tokenize
tokens = tokenizer.tokenize(txt)
counts_corpus[list_corpus.index(f)] = len(tokens)
# count interations
if count % every == 0:
print(count)
count += 1
return counts_corpus
示例8: get_tokens
def get_tokens(dict_element):
# Remove stop words from data and perform initial
# cleanup for feature extraction
query = dict_element['query']
desc = dict_element['product_description']
title = dict_element['product_title']
stop = stopwords.words('english')
pattern = r'''(?x) # set flag to allow verbose regexps
([A-Z]\.)+ # abbreviations, e.g. U.S.A.
| \$?\d+(\.\d+)?%? # numbers, incl. currency and percentages
| \w+([-']\w+)* # words w/ optional internal hyphens/apostrophe
| @((\w)+([-']\w+))*
| [+/\[email protected]&*] # special characters with meanings
'''
#pattern = r'[+/\[email protected]&*#](\w+)|(\w+)'
tokenizer = RegexpTokenizer(pattern)
#tokenizer = RegexpTokenizer(r'\w+')
query_tokens = tokenizer.tokenize(query)
query_tokens = map(lambda x:x.lower(),query_tokens)
desc_tokens = tokenizer.tokenize(desc)
desc_tokens = [x.lower() for x in desc_tokens if x.lower() not in stop]
title_tokens = tokenizer.tokenize(title)
title_tokens = [x.lower() for x in title_tokens if x.lower() not in stop]
return query_tokens, title_tokens, desc_tokens
示例9: analyze_dataset
def analyze_dataset():
l_sentences = []
with open('/Users/miljan/PycharmProjects/thesis-shared/data/pang_and_lee_data/rt-negative.txt') as file1:
r = reader(file1, dialect='excel-tab')
for row in r:
l_sentences.append(row[0])
with open('/Users/miljan/PycharmProjects/thesis-shared/data/pang_and_lee_data/rt-positive.txt') as file2:
r = reader(file2, dialect='excel-tab')
for row in r:
l_sentences.append(row[0])
# chunk the given text into sentences
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
d_lengths = defaultdict(int)
tokenizer2 = RegexpTokenizer(r'\w+')
# clean sentences from punctuation
l_sentences = [''.join(ch for ch in sent if ch not in set(string.punctuation)) for sent in l_sentences]
l_sentences = [len(tokenizer2.tokenize(sen)) for sen in l_sentences]
total_sent = len(l_sentences)
d_lengths = Counter(l_sentences)
print total_sent
lengths = sorted(d_lengths.iteritems(), key=lambda key_value: int(key_value[0]))
plot(lengths)
开发者ID:noforcecanstopme,项目名称:deep-learning-for-sentiment-mining,代码行数:25,代码来源:sent_size_ditribution.py
示例10: text2sents
def text2sents(text, lemmatize=False, stemmer=None):
"""
converts a text into a list of sentences consisted of normalized words
:param text: list of string to process
:param lemmatize: if true, words will be lemmatized, otherwise -- stemmed
:param stemmer: stemmer to be used, if None, PortedStemmer is used. Only applyed if lemmatize==False
:return: list of lists of words
"""
sents = sent_tokenize(text)
tokenizer = RegexpTokenizer(r'\w+')
if lemmatize:
normalizer = WordNetLemmatizer()
tagger = PerceptronTagger()
elif stemmer is None:
normalizer = PorterStemmer()
else:
normalizer = stemmer
sents_normalized = []
for sent in sents:
sent_tokenized = tokenizer.tokenize(sent)
if lemmatize:
sent_tagged = tagger.tag(sent_tokenized)
sent_normalized = [normalizer.lemmatize(w[0], get_wordnet_pos(w[1])) for w in sent_tagged]
else:
sent_normalized = [normalizer.stem(w) for w in sent_tokenized]
sents_normalized.append(sent_normalized)
return sents_normalized
示例11: tokenize
def tokenize(text):
"""
Input: "Body of text...:
Output: [word, ...] list of tokenized words matching regex '\w+'
"""
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(text)
return tokens
示例12: tokenize
def tokenize(self, text):
"""
tokenise text using nltk RegexpTokenizer
:param text:
:return: list of tokens
"""
tokenizer = RegexpTokenizer(self.pattern)
tokens = tokenizer.tokenize(text)
return tokens
示例13: tokenize
def tokenize(self, text):
"""
:param tweet_list:
:type list:
:return: tokens
This tokenizer uses the nltk RegexpTokenizer.
"""
tokenizer = RegexpTokenizer(self.pattern)
tokens = tokenizer.tokenize(text)
return tokens
示例14: __call__
def __call__(self, doc ):
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
#tokenizer = RegexpTokenizer(r'\w+')
tokenizer = RegexpTokenizer(r'[a-zA-Z]+')
#words=[self.wnl.lemmatize(t) for t in word_tokenize(doc)]
words=[self.wnl.lemmatize(t) for t in tokenizer.tokenize(doc)]
mystops=(u'youtube',u'mine',u'this',u'that','facebook','com','google','www','http','https')
stop_words=set(stopwords.words('english'))
stop_words.update(mystops)
stop_words=list(stop_words)
return [i.lower() for i in words if i not in stop_words]
示例15: tokenize_and_stem
def tokenize_and_stem(doc):
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
tokens = tokenizer.tokenize(doc)
clean = [token.lower() for token in tokens if token.lower() not in en_stop and len(token) > 2]
final = [p_stemmer.stem(word) for word in clean]
return final