本文整理汇总了Python中nltk.tokenize.punkt.PunktSentenceTokenizer类的典型用法代码示例。如果您正苦于以下问题:Python PunktSentenceTokenizer类的具体用法?Python PunktSentenceTokenizer怎么用?Python PunktSentenceTokenizer使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PunktSentenceTokenizer类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess
def preprocess(phys):
'''
:param fname: a text file
:return: a json of sentences, processed for searchability
'''
phys = phys.decode('utf-8')
phys = re.sub('(\n)+', '. ', phys)
sentence_tokenizer = PunktSentenceTokenizer()
sentences = sentence_tokenizer.tokenize(phys)
for i in xrange(len(sentences)):
sentence = unicode(sentences[i])
sentence = sentence.replace('\n', ' ')
sentence = re.sub(' +',' ',sentence)
sentence = re.sub(r'\d+', '', sentence)
sentence = sentence.replace("-"," ")
exclude = string.punctuation
sentence = ''.join(ch for ch in sentence if ch not in exclude)
sentence = re.sub(' +',' ',sentence)
sentences[i] = sentence
# sentences[i] = sentence.encode('utf-8')
count = 0
for sentence in sentences:
if sentence == ' ' or sentence == '':
sentences.pop(count)
count +=1
# with open(fname.rstrip('txt')+'json', 'w') as outfile:
# json.dump(sentences, outfile)
return sentences
示例2: _split_sentences
def _split_sentences(self, text):
from nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs', 'prof', 'inc'])
sentence_splitter = PunktSentenceTokenizer(punkt_param)
sentences = sentence_splitter.tokenize(text)
return sentences
示例3: summarize
def summarize(self):
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs', 'prof', 'inc'])
sentence_splitter = PunktSentenceTokenizer(punkt_param)
sentences = sentence_splitter.tokenize(self.text)
structure = {}
sentence_objects = []
for idx in range(len(sentences)):
obj = {'text' : sentences[idx], 'index' : idx , 'data': {}}
sentence_objects.append(obj)
structure['sentences'] = sentence_objects
self.sentencecount = len(structure['sentences'])
structure['ordered'] = []
structure['weights'] = {'words' : FreqDist(nltk.word_tokenize(preprocess(self.text))), 'total': 0, 'transformed': 0}
structure['weights']['total'] = sum(structure['weights']['words'].values())
self.sentenceIndex = 0
for each_sent in structure['sentences']:
each_sent['data']['tokens'] = nltk.word_tokenize(preprocess(each_sent['text']))
each_sent['data']['sinTransform'] = (1-math.sin(self.sentenceIndex*(math.pi/self.sentencecount)))+1
for each_word in structure['weights']['words']:
if each_word in each_sent['data']['tokens']:
structure['weights']['words'][each_word] *= each_sent['data']['sinTransform']
self.sentenceIndex += 1
structure['weights']['transformed'] = sum(structure['weights']['words'].values())
self.sentenceIndex = 0
for each_sent in structure['sentences']:
each_sent['data']['weights'] = {'words': self.calculate_relative_frequence(each_sent['data']['tokens'], structure['weights']['words']), 'total': 0}
each_sent['data']['weights']['total'] = sum(each_sent['data']['weights']['words'].values())
self.sentenceIndex += 1
structure['ordered'] = sorted(structure['sentences'], key=lambda x:x['data']['weights']['total'], reverse=True)
structure_keep = structure['ordered'][:self.quota]
structure_keep.sort(key=lambda x:x['index'])
for eac_sen in structure_keep:
self.summary.append(eac_sen['text'])
示例4: textrank
def textrank(document):
pst = PunktSentenceTokenizer()
sentences = pst.tokenize(document)
# Bag of Words
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
bow_matrix = cv.fit_transform(sentences)
from sklearn.feature_extraction.text import TfidfTransformer
normalized_matrix = TfidfTransformer().fit_transform(bow_matrix)
## mirrored matrix where the rows and columns correspond to
## sentences, and the elements describe how similar the
## sentences are. score 1 means sentences are exactly the same.
similarity_graph = normalized_matrix * normalized_matrix.T
similarity_graph.toarray()
# PageRank
import networkx as nx
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
## mapping of sentence indices to scores. use them to associate
## back to the original sentences and sort them
scores = nx.pagerank(nx_graph)
ranked = sorted(((scores[i], s) for i,s in enumerate(sentences)), reverse=True)
print ranked[0][1]
示例5: featureize
def featureize(F, observation_files):
word_tokenizer = PunktSentenceTokenizer()
sent_tokenizer = PunktSentenceTokenizer()
m = len(observation_files)
# X is Nx2
X = np.zeros((m,2), dtype=np.float)
for (i,filename) in enumerate(observation_files,start=0):
file_text = read_file(filename).decode('string_escape')
try:
num_sents = len(sent_tokenizer.sentences_from_text(file_text))
except UnicodeDecodeError:
num_sents = 2
#num_tokens = len(word_tokenize(file_text))
num_tokens = len(file_text.split())
# Return two features:
# 1 (0) - Number of sentences per file
# 2 (1) - Number of tokens per file
X[i][0] = num_sents
X[i][1] = num_tokens
return X
示例6: get_key_sentences
def get_key_sentences(self, n=5):
'''
Uses a simple implementation of TextRank to extract the top N sentences
from a document.
Sources:
- Original paper: http://acl.ldc.upenn.edu/acl2004/emnlp/pdf/Mihalcea.pdf
- Super useful blog post: http://joshbohde.com/blog/document-summarization
- Wikipedia: http://en.wikipedia.org/wiki/Automatic_summarization#Unsupervised_keyphrase_extraction:_TextRank
'''
# Tokenize the document into sentences. More NLP preprocesing should also happen here.
sentence_tokenizer = PunktSentenceTokenizer()
sentences = sentence_tokenizer.tokenize(self.doc)
# Calculate word counts and TFIDF vectors
word_counts = CountVectorizer(min_df=0).fit_transform(sentences)
normalized = TfidfTransformer().fit_transform(word_counts)
# Normalized graph * its transpose yields a sentence-level similarity matrix
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i],s) for i,s in enumerate(sentences)),
reverse=True)[n]
示例7: tokenize_sentences
def tokenize_sentences(self, untokenized_string: str):
"""Tokenize sentences by reading trained tokenizer and invoking
``PunktSentenceTokenizer()``.
:type untokenized_string: str
:param untokenized_string: A string containing one of more sentences.
:rtype : list of strings
"""
# load tokenizer
assert isinstance(untokenized_string, str), \
'Incoming argument must be a string.'
if self.language == 'latin':
tokenizer = super()
elif self.language == 'greek': # Workaround for regex tokenizer
self.sent_end_chars=GreekLanguageVars.sent_end_chars
self.sent_end_chars_regex = '|'.join(self.sent_end_chars)
self.pattern = rf'(?<=[{self.sent_end_chars_regex}])\s'
elif self.language in INDIAN_LANGUAGES:
self.sent_end_chars=SanskritLanguageVars.sent_end_chars
self.sent_end_chars_regex = '|'.join(self.sent_end_chars)
self.pattern = rf'(?<=[{self.sent_end_chars_regex}])\s'
else:
# Warn that NLTK Punkt is being used by default???
tokenizer = PunktSentenceTokenizer()
# mk list of tokenized sentences
if self.language == 'greek' or self.language in INDIAN_LANGUAGES:
return re.split(self.pattern, untokenized_string)
else:
return tokenizer.tokenize(untokenized_string)
示例8: preprocessin
def preprocessin(self, cell_value):
# to tokenize the tweet into sentences
tweet = PunktSentenceTokenizer().tokenize(cell_value)
# to remove 'u'
tweet = '\n'.join(tweet)
# to remove html tags
tweet = self.remTags(tweet)
# to lower aplphabets
tweet = tweet.lower()
##Removing all junk
tweet = re.sub(u'(RT |\\\\|\u201c)"[email protected]*?[: ]', ' ', tweet)
tweet = re.sub('@', ' ', tweet)
tweet = re.sub(r'[^\x00-\x7F]', ' ', tweet)
tweet = re.sub('[\s]+', ' ', tweet)
tweet = re.sub('_', ' ', tweet)
tweet = re.sub('((www\.[\s]+)|(https?://[^\s]+))', '', tweet)
tweet = re.sub(r'\\([^\s]+)', ' ', tweet)
tweet = re.sub(u'[\u2018\u2019]', '\'', tweet)
tweet = re.sub('(^|)?http?s?:?/?/?.*?( |$)', ' ', tweet)
tweet = re.sub(u'\u2026', ' ', tweet)
tweet = re.sub('---', ' ', tweet)
tweet = re.sub(u'[\u201c\u201d]', '"', tweet)
tweet = re.sub('\[email protected]*?( |:|$)', ' ', tweet)
tweet = re.sub(r"\.\.+", ' ', tweet)
tweet = re.sub('&', ' ', tweet)
tweet = re.sub('\.\.\.', ' ', tweet)
tweet = tweet.strip('\'"')
tweet = re.sub('(, |\.( |$))', ' ', tweet)
tweet = re.sub('[][!"$*,/;<=>[email protected]\\\\^_`{|}~]', ' ', tweet)
tweet = re.sub('( - )', ' ', tweet)
return tweet
示例9: get_todo_items
def get_todo_items(text):
all_items = list()
tokenizer = PunktSentenceTokenizer()
sen_tokens = tokenizer.tokenize(text)
for sen_token in sen_tokens:
todo_items = list()
tokens = nltk.word_tokenize(sen_token)
tags = tagger.tag(tokens)
stop_words = [word for (word, tag) in tags if tag in (tagVB, tagVBP)]
ind = -1
for word in stop_words:
curr_ind = tokens.index(word)
if curr_ind != 0 and tags[curr_ind - 1][1] in (tagCC, tagRB):
to_ind = curr_ind - 1
else: to_ind = curr_ind
if ind != -1 and abs(to_ind - ind) > 1:
todo_items.append(' '.join(tokens[ind:get_punctuation_index(tokens, ind, to_ind)]))
elif ind != -1 and len(todo_items) > 0:
last_ind = len(todo_items)
todo_items[last_ind - 1] = ' '.join([todo_items[last_ind - 1], tokens[to_ind - 1]])
ind = curr_ind
if ind != -1 and abs(len(tokens) - ind) > 1:
todo_items.append(' '.join(tokens[ind:get_punctuation_index(tokens, ind, len(tokens))]))
elif ind != -1 and len(todo_items) > 0:
last_ind = len(todo_items)
todo_items[last_ind - 1] = ' '.join([todo_items[last_ind - 1], tokens[len(tokens) - 1]])
all_items.extend(todo_items)
return all_items
示例10: fractal_representation
def fractal_representation(self):
punkt_param = PunktParameters()
for each_paragraph in self.paragraphs:
buffer_p = paragraph()
buffer_p.paragraph = each_paragraph
buffer_p.tokens = nltk.word_tokenize(preprocess(each_paragraph))
buffer_p.weights['words'] = FreqDist(buffer_p.tokens)
buffer_p.weights['total'] = {'words':0, 'sentences':0}
punkt_param.abbrev_types = set(['dr', 'vs', 'mr', 'mrs', 'prof', 'inc'])
sentence_splitter = PunktSentenceTokenizer(punkt_param)
sentences = sentence_splitter.tokenize(each_paragraph)
for each_sentence in sentences:
self.stotal += 1
buffer_s = sentence()
buffer_s.sentence = each_sentence
buffer_s.tokens = nltk.word_tokenize(preprocess(each_sentence))
if len(buffer_s.tokens) > 0:
buffer_s.weights['sentence'] = FreqDist(buffer_s.tokens)
buffer_s.weights['paragraph'] = self.calculate_relative_frequence(buffer_s.tokens, buffer_p.weights['words'])
buffer_s.weights['document'] = self.calculate_relative_frequence(buffer_s.tokens, self.fractal.weights)
buffer_s.weights['total'] = {}
buffer_s.weights['total']['sentence'] = 1
buffer_s.weights['total']['paragraph'] = sum(buffer_s.weights['paragraph'].values())
buffer_s.weights['total']['document'] = sum(buffer_s.weights['document'].values())
self.s_weight += buffer_s.weights['total']['document']
buffer_p.weights['total']['sentences'] += buffer_s.weights['total']['document']
buffer_p.sentences.append(buffer_s)
buffer_p.weights['total']['words'] = sum(buffer_p.weights['words'].values())
self.fractal.paragraphs.append(buffer_p)
self.pindex += 1
示例11: sentence_tokenizer
def sentence_tokenizer(self, untokenized_string, language):
"""Reads language .pickle for right language"""
if language == 'greek':
pickle_path = os.path.expanduser('~/cltk_data/greek/cltk_linguistic_data/tokenizers/sentence/greek.pickle')
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = ('.', ';')
language_punkt_vars.internal_punctuation = (',', '·')
elif language == 'latin':
pickle_path = os.path.expanduser('~/cltk_data/latin/cltk_linguistic_data/tokenizers/sentence/latin.pickle')
language_punkt_vars = PunktLanguageVars
language_punkt_vars.sent_end_chars = ('.', '?', ':')
language_punkt_vars.internal_punctuation = (',', ';')
else:
print("No sentence tokenizer for this language available.")
with open(pickle_path, 'rb') as open_pickle:
tokenizer = pickle.load(open_pickle)
tokenizer.INCLUDE_ALL_COLLOCS = True
tokenizer.INCLUDE_ABBREV_COLLOCS = True
params = tokenizer.get_params()
sbd = PunktSentenceTokenizer(params)
tokenized_sentences = []
for sentence in sbd.sentences_from_text(untokenized_string,
realign_boundaries=True):
tokenized_sentences.append(sentence)
return tokenized_sentences
示例12: _punkt_sent_tokenize
def _punkt_sent_tokenize(text):
'''
Sentence segmentation using nltk PunktSentenceTokenizer.
'''
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(config.tokenize_abbrev)
sentence_splitter = PunktSentenceTokenizer(punkt_param)
return sentence_splitter.tokenize(text)
示例13: tokenize
def tokenize(self):
"""
Returns a list of tokenized sentences
"""
sentence_tokenizer = PunktSentenceTokenizer()
sentences = sentence_tokenizer.sentences_from_text(self.text)
sentences = [sentence.split() for sentence in sentences]
sentences = [[word.strip(",.?!") for word in sentence] for sentence in sentences]
return sentences
示例14: transform
def transform(self,documents):
sentence_splitter = PunktSentenceTokenizer()
for doc in documents:
if not 'sentences' in doc.ext:
doc.ext['sentences'] = [s.strip() for s in sentence_splitter.tokenize(doc.text)]
# for doc in documents:
# if not 'sentences' in doc.ext:
# doc.ext['sentences'] = [s.strip() for s in doc.text.split('.') if s]
return documents
示例15: parse
def parse (text):
"""Use nltk's PunktSentenceTokenizer to convert the text string into
a list of English-language sentences."""
punkt_param = PunktParameters()
punkt_param.abbrev_types = set(ABBREVIATIONS)
sentence_splitter = PunktSentenceTokenizer(punkt_param)
return sentence_splitter.tokenize(preprocess(text))