本文整理匯總了Python中nltk.corpus.wordnet.morphy方法的典型用法代碼示例。如果您正苦於以下問題:Python wordnet.morphy方法的具體用法?Python wordnet.morphy怎麽用?Python wordnet.morphy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類nltk.corpus.wordnet
的用法示例。
在下文中一共展示了wordnet.morphy方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: tag_semantic_similarity
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def tag_semantic_similarity(x, y, ic):
mx = wn.morphy(x)
my = wn.morphy(y)
if mx is None or my is None:
return 0
synX = wn.synsets(mx, pos=wn.NOUN)
synY = wn.synsets(my, pos=wn.NOUN)
if len(synX) > 0 and len(synY) > 0:
maxSim = synX[0].lin_similarity(synY[0], ic)
else:
maxSim = 0
return maxSim
示例2: wordnet_similarity_matrix
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def wordnet_similarity_matrix(embeddings):
"""
Makes a similarity matrix from WordNet.
Embeddings argument is only used to get set of words to use.
"""
sim_mat = np.zeros((len(embeddings.iw), len(embeddings.iw)))
words = {word:wn.morphy(word) for word in embeddings.iw}
lemmas = {lemma:word for word, lemma in words.iteritems()}
for i, word in enumerate(words):
if words[word] == None:
continue
synonyms = set(chain.from_iterable([o_word.lemma_names()
for o_word in wn.synsets(words[word])]))
for o_word in synonyms:
if o_word in lemmas:
sim_mat[embeddings.wi[word], embeddings.wi[lemmas[o_word]]] = 1.
print np.sum(sim_mat)
np.fill_diagonal(sim_mat, 0)
return sim_mat
示例3: plural_rule
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def plural_rule(ans):
singular = wordnet.morphy(ans)
if singular is not None and singular != ans:
return singular
else:
return ans
示例4: check_robustpca
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def check_robustpca(trainCollection, testCollection, feature):
ready = True
# check matlab
if not check_matlab():
print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'Matlab is not available or incorrectly configured.')
ready = False
# check if knn is available
if not check_knn(trainCollection, testCollection, feature):
print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'KNN is not available.')
ready = False
# check data files
datafiles = [ os.path.join(ROOT_PATH, trainCollection, 'TextData', 'id.userid.lemmtags.txt'),
os.path.join(ROOT_PATH, trainCollection, 'FeatureData', feature)]
res = find_missing_files(datafiles)
if res:
print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'the following files or folders are missing:\n%s' % res)
return False
# check external dependencies
try:
import h5py
import numpy
import scipy.io
import scipy.sparse
from nltk.corpus import wordnet as wn
from nltk.corpus import wordnet_ic
brown_ic = wordnet_ic.ic('ic-brown.dat')
wn.morphy('cat')
wn.synsets('cat', pos=wn.NOUN)
except Exception, e:
try:
import nltk
nltk.download('brown')
nltk.download('wordnet')
nltk.download('wordnet_ic')
except Exception, e:
print e
ready = False
示例5: get
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def get(self, k, default=None):
try:
return self[k]
except KeyError as e:
if self._lowercase_if_OOV:
lowercased = k.lower()
if lowercased in self.vocabulary:
return self[lowercased]
if self._lemmatize_if_OOV:
lemma = wordnet.morphy(k)
if lemma in self.vocabulary:
return self[lemma]
return default
示例6: get_multi
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def get_multi(self, k, default=None):
try:
return self.multi_vectors[self.vocabulary[k]]
except KeyError as e:
if self._lowercase_if_OOV:
lowercased = k.lower()
if lowercased in self.vocabulary:
return self.multi_vectors[self.vocabulary[lowercased]]
if self._lemmatize_if_OOV:
lemma = wordnet.morphy(k)
if lemma in self.vocabulary:
return self.multi_vectors[self.vocabulary[lemma]]
return default
示例7: obtain_linguistic_relationships
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def obtain_linguistic_relationships(word):
word = word.strip('"')
linguistic_relationships = []
linguistic_relationships.append(('copy', word))
base_word = wn.morphy(word)
if base_word == None:
base_word = word.lower()
if word != base_word:
linguistic_relationships.append(('inflection', base_word))
linguistic_relationships.extend(\
[('synonym', lemma) for lemma in obtain_synonyms(word)])
linguistic_relationships.extend(\
[('hypernym', lemma) for lemma in obtain_hypernyms(word)])
linguistic_relationships.extend(\
[('hyponym', lemma) for lemma in obtain_hyponyms(word)])
linguistic_relationships.extend(\
[('holonym', lemma) for lemma in obtain_holonyms(word)])
linguistic_relationships.extend(\
[('meronym', lemma) for lemma in obtain_meronyms(word)])
linguistic_relationships.extend(\
[('antonym', lemma) for lemma in obtain_antonyms(word)])
linguistic_relationships.extend(\
[('entailed', lemma) for lemma in obtain_entailments(word)])
linguistic_relationships.extend(\
[('derivation', lemma) for lemma in obtain_derivations(word)])
return linguistic_relationships
# Check if word1 is synonym of word2, but checking whether the intersection
# between the synset of word1 and the synset of word2.
# If word1 = 'car' and word2 = 'automobile', this function should return True.
示例8: get_verbocean_relations
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def get_verbocean_relations(verb1, verb2):
if verb1 in verbocean and verb2 in verbocean[verb1]:
return set(verbocean[verb1][verb2])
return set()
# Find linguistic relationship between two words.
# Remaining relationships that I would like to implement:
# linguistic_relationship('man', 'men') would return 'plural'.
# linguistic_relationship('go', 'went') would return 'present'.
# BUG: linguistic_relationship('man', 'men') returns
# ['synonym', 'hypernym', 'hyponym'] because 'man' and 'men' have the same
# lemma but wn.morphy cannot recognize it. We should detect this and prevent
# those relationships from triggering. However,
# linguistic_relationship('woman', 'women') returns ['inflection'] as expected,
# until we implement the 'plural' relationship.
示例9: linguistic_relationship
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def linguistic_relationship(word1, word2):
(word1, word2) = (word1.strip('"'), word2.strip('"'))
if word1 == word2:
return ['copy']
base_word1 = wn.morphy(word1)
base_word2 = wn.morphy(word2)
if base_word1 == None:
base_word1 = word1.lower()
if base_word2 == None:
base_word2 = word2.lower()
ling_relations = []
if word1 != word2 and base_word1 == base_word2:
return ['inflection']
if is_synonym(base_word1, base_word2):
ling_relations.append('synonym')
if is_hypernym(base_word1, base_word2):
ling_relations.append('hyponym')
if is_hyponym(base_word1, base_word2):
ling_relations.append('hypernym')
if is_similar(base_word1, base_word2):
ling_relations.append('similar')
if is_holonym(base_word1, base_word2):
ling_relations.append('holonym')
if is_meronym(base_word1, base_word2):
ling_relations.append('meronym')
if is_antonym(base_word1, base_word2):
ling_relations.append('antonym')
if is_entailed(base_word1, base_word2):
ling_relations.append('entailed')
if is_derivation(word1, word2):
ling_relations.append('derivation')
# Typical types of verbocean relations are "happens-before" or "stronger-than"
ling_relations.extend(get_verbocean_relations(base_word1, base_word2))
return ling_relations
示例10: page_from_reference
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_')
for w in words]
if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos,pos_str,name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
示例11: page_from_reference
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_')
for w in words]
if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos,pos_str,name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################
示例12: page_from_reference
# 需要導入模塊: from nltk.corpus import wordnet [as 別名]
# 或者: from nltk.corpus.wordnet import morphy [as 別名]
def page_from_reference(href):
'''
Returns a tuple of the HTML page built and the new current word
:param href: The hypertext reference to be solved
:type href: str
:return: A tuple (page,word), where page is the new current HTML page
to be sent to the browser and
word is the new current word
:rtype: A tuple (str,str)
'''
word = href.word
pos_forms = defaultdict(list)
words = word.split(',')
words = [w for w in [w.strip().lower().replace(' ', '_') for w in words] if w != ""]
if len(words) == 0:
# No words were found.
return "", "Please specify a word to search for."
# This looks up multiple words at once. This is probably not
# necessary and may lead to problems.
for w in words:
for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADV]:
form = wn.morphy(w, pos)
if form and form not in pos_forms[pos]:
pos_forms[pos].append(form)
body = ''
for pos, pos_str, name in _pos_tuples():
if pos in pos_forms:
body += _hlev(3, name) + '\n'
for w in pos_forms[pos]:
# Not all words of exc files are in the database, skip
# to the next word if a KeyError is raised.
try:
body += _collect_all_synsets(w, pos, href.synset_relations)
except KeyError:
pass
if not body:
body = "The word or words '%s' where not found in the dictonary." % word
return body, word
#####################################################################
# Static pages
#####################################################################