本文整理匯總了Python中nltk.tokenize.regexp.WhitespaceTokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python regexp.WhitespaceTokenizer方法的具體用法?Python regexp.WhitespaceTokenizer怎麽用?Python regexp.WhitespaceTokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類nltk.tokenize.regexp
的用法示例。
在下文中一共展示了regexp.WhitespaceTokenizer方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: demo_sent_subjectivity
# 需要導入模塊: from nltk.tokenize import regexp [as 別名]
# 或者: from nltk.tokenize.regexp import WhitespaceTokenizer [as 別名]
def demo_sent_subjectivity(text):
"""
Classify a single sentence as subjective or objective using a stored
SentimentAnalyzer.
:param text: a sentence whose subjectivity has to be classified.
"""
from nltk.classify import NaiveBayesClassifier
from nltk.tokenize import regexp
word_tokenizer = regexp.WhitespaceTokenizer()
try:
sentim_analyzer = load('sa_subjectivity.pickle')
except LookupError:
print('Cannot find the sentiment analyzer you want to load.')
print('Training a new one using NaiveBayesClassifier.')
sentim_analyzer = demo_subjectivity(NaiveBayesClassifier.train, True)
# Tokenize and convert to lower case
tokenized_text = [word.lower() for word in word_tokenizer.tokenize(text)]
print(sentim_analyzer.classify(tokenized_text))
示例2: __init__
# 需要導入模塊: from nltk.tokenize import regexp [as 別名]
# 或者: from nltk.tokenize.regexp import WhitespaceTokenizer [as 別名]
def __init__(self, use_unicode):
self.repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')
self.repl = r'\1\2\3'
self.tokenizer = WhitespaceTokenizer()
self.cached_stopwords = stopwords.words('english')
self.symbols = [u"\"", u"'", u"!", u"?", u".", u",", u";", u">", u"_", u"<", u"-", u"[",
u"]", u"{", u"}", u"/", u"\\", u"^", u"~", u"", u"`", u"``", u"\u2026",
u":", u"(", u")", u"|", u"#", u"$", u"%", u"&", u"*", u"=", u"+", u"\u2013",
u"\u201c", u"\u201d", u"\u300b\u300b", u"\u2019", u"\u2018", u"\u00b0",
u"\u00ba", u"\u200b", u"\u00b7", u"\u2014", u"\u00bb", u"\u221a", u"\u00aa",
u"\ufe0f", u"\u2794", u"\u2192", u"\u00a8", u"\u2022", u"\u300a", u"\u00bf",
u"\u25a0", u"\u00af", u"\u22b3", u"\u2060", u"\u261b", u"\u00ad", u"\u00ab"]
if use_unicode:
self.accents = unicode_replace
else:
self.accents = ascii_replace
self.link_patterns = [('http'), ('www'), ('w3c')]
self.digraph = [(r'hash','#'),(r'rxr','rr'),(r'sxs','ss'),(r'aqa','aa'),(r'eqe','ee'),(r'oqo','oo'),(r'fqf','ff'),(r'gqg','gg'),(r'cqc','cc'),(r'dqd','dd'),
(r'mqm','mm'),(r'nqn','nn'),(r'pqp','pp'),(r'dqd','dd'),(r'tqt','tt'),(r'fqf','ff'),(r'lql','ll')]
# Remover caracteres repetidos seguidamente, para que o modelo no seja prejudicado
# por falta de padro na escrita.
示例3: __init__
# 需要導入模塊: from nltk.tokenize import regexp [as 別名]
# 或者: from nltk.tokenize.regexp import WhitespaceTokenizer [as 別名]
def __init__(self, use_unicode=True):
self.repeat_regexp = re.compile(r'(\w*)(\w)\2(\w*)')
self.repl = r'\1\2\3'
self.pt_stemmer = nltk.stem.RSLPStemmer()
self.tokenizer = WhitespaceTokenizer()
self.cached_stopwords = stopwords.words('portuguese')
self.symbols = [u"\"", u"'", u"!", u"?", u".", u",", u";", u">", u"_", u"<", u"-", u"[",
u"]", u"{", u"}", u"/", u"\\", u"^", u"~", u"", u"`", u"``", u"\u2026",
u":", u"(", u")", u"|", u"#", u"$", u"%", u"&", u"*", u"=", u"+", u"\u2013",
u"\u201c", u"\u201d", u"\u300b", u"\u2019", u"\u2018", u"\u00b0", u"\u30fb",
u"\u00ba", u"\u200b", u"\u00b7", u"\u2014", u"\u00bb", u"\u221a", u"\u00aa",
u"\ufe0f", u"\u2794", u"\u2192", u"\u00a8", u"\u2022", u"\u300a", u"\u00bf",
u"\u25a0", u"\u00af", u"\u22b3", u"\u2060", u"\u261b", u"\u00ad", u"\u00ab"]
self.more_stopwords = ['ja', 'q', 'd', 'ai', 'desse', 'dessa', 'disso', 'nesse', 'nessa', 'nisso', 'esse', 'essa', 'isso', 'so', 'mt', 'vc', 'voce', 'ne', 'ta', 'to', 'pq',
'cade', 'kd', 'la', 'e', 'eh', 'dai', 'pra', 'vai', 'olha', 'pois', 'rt', 'retweeted',
'fica', 'muito', 'muita', 'muitos', 'muitas', 'onde', 'mim', 'oi', 'ola', 'ate']
if use_unicode:
self.accents = unicode_replace
else:
self.accents = ascii_replace
self.link_patterns = [('http'), ('www'), ('w3c'), ('https')]
self.normal = [(r'kxkxk', 'kkk'), (r'nao ', ' nao_'), (r' ir ', '_ir '), (r'bom demal', ' bomdemais '), (r'\s*insan\s*', ' insano '), (r'\s*saudad\s*', ' saudade ')]
self.digraph = [(r'rxr', 'rr'), (r'sxs', 'ss'), (r'aqa', 'aa'), (r'eqe', 'ee'), (r'oqo', 'oo')]
# Remover caracteres repetidos seguidamente, para que o modelo no seja prejudicado
# por falta de padro na escrita.
示例4: demo_subjectivity
# 需要導入模塊: from nltk.tokenize import regexp [as 別名]
# 或者: from nltk.tokenize.regexp import WhitespaceTokenizer [as 別名]
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
"""
Train and test a classifier on instances of the Subjective Dataset by Pang and
Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
All tokens (words and punctuation marks) are separated by a whitespace, so
we use the basic WhitespaceTokenizer to parse the data.
:param trainer: `train` method of a classifier.
:param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
:param n_instances: the number of total sentences that have to be used for
training and testing. Sentences will be equally split between positive
and negative.
:param output: the output file where results have to be reported.
"""
from nltk.sentiment import SentimentAnalyzer
from nltk.corpus import subjectivity
if n_instances is not None:
n_instances = int(n_instances/2)
subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_subj_docs, test_subj_docs = split_train_test(subj_docs)
train_obj_docs, test_obj_docs = split_train_test(obj_docs)
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
# Add simple unigram word features handling negation
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if save_analyzer == True:
save_file(sentim_analyzer, 'sa_subjectivity.pickle')
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__,
Tokenizer='WhitespaceTokenizer', Feats=extr,
Instances=n_instances, Results=results)
return sentim_analyzer
示例5: demo_subjectivity
# 需要導入模塊: from nltk.tokenize import regexp [as 別名]
# 或者: from nltk.tokenize.regexp import WhitespaceTokenizer [as 別名]
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
"""
Train and test a classifier on instances of the Subjective Dataset by Pang and
Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
All tokens (words and punctuation marks) are separated by a whitespace, so
we use the basic WhitespaceTokenizer to parse the data.
:param trainer: `train` method of a classifier.
:param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
:param n_instances: the number of total sentences that have to be used for
training and testing. Sentences will be equally split between positive
and negative.
:param output: the output file where results have to be reported.
"""
from sentiment_analyzer import SentimentAnalyzer
from nltk.corpus import subjectivity
if n_instances is not None:
n_instances = int(n_instances/2)
subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
# We separately split subjective and objective instances to keep a balanced
# uniform class distribution in both train and test sets.
train_subj_docs, test_subj_docs = split_train_test(subj_docs)
train_obj_docs, test_obj_docs = split_train_test(obj_docs)
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
# Add simple unigram word features handling negation
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
# Apply features to obtain a feature-value representation of our datasets
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
classifier = sentim_analyzer.train(trainer, training_set)
try:
classifier.show_most_informative_features()
except AttributeError:
print('Your classifier does not provide a show_most_informative_features() method.')
results = sentim_analyzer.evaluate(test_set)
if save_analyzer == True:
save_file(sentim_analyzer, 'sa_subjectivity.pickle')
if output:
extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__,
Tokenizer='WhitespaceTokenizer', Feats=extr,
Instances=n_instances, Results=results)
return sentim_analyzer