当前位置: 首页>>代码示例>>Python>>正文


Python wordnet.NOUN属性代码示例

本文整理汇总了Python中nltk.corpus.wordnet.NOUN属性的典型用法代码示例。如果您正苦于以下问题:Python wordnet.NOUN属性的具体用法?Python wordnet.NOUN怎么用?Python wordnet.NOUN使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在nltk.corpus.wordnet的用法示例。


在下文中一共展示了wordnet.NOUN属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: tag_semantic_similarity

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def tag_semantic_similarity(x, y, ic):
    mx = wn.morphy(x)
    my = wn.morphy(y)

    if mx is None or my is None:
        return 0

    synX = wn.synsets(mx, pos=wn.NOUN)
    synY = wn.synsets(my, pos=wn.NOUN)

    if len(synX) > 0 and len(synY) > 0:
        maxSim = synX[0].lin_similarity(synY[0], ic)
    else:
        maxSim = 0

    return maxSim 
开发者ID:li-xirong,项目名称:jingwei,代码行数:18,代码来源:laplacian_tags.py

示例2: pos_tag_convert_penn_to_wn

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def pos_tag_convert_penn_to_wn(tag):
    """
    Convert POS tag from Penn tagset to WordNet tagset.

    :param tag: a tag from Penn tagset
    :return: a tag from WordNet tagset or None if no corresponding tag could be found
    """
    from nltk.corpus import wordnet as wn

    if tag in ['JJ', 'JJR', 'JJS']:
        return wn.ADJ
    elif tag in ['RB', 'RBR', 'RBS']:
        return wn.ADV
    elif tag in ['NN', 'NNS', 'NNP', 'NNPS']:
        return wn.NOUN
    elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
        return wn.VERB
    return None 
开发者ID:WZBSocialScienceCenter,项目名称:tmtoolkit,代码行数:20,代码来源:_common.py

示例3: get_wordnet_pos

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def get_wordnet_pos(self,treebank_tag):
        """
        return WORDNET POS compliance to WORDENT lemmatization (a,n,r,v) 
        """

        if treebank_tag.startswith('J'):
            return wordnet.ADJ

        elif treebank_tag.startswith('V'):
            return wordnet.VERB

        elif treebank_tag.startswith('N'):
            return wordnet.NOUN

        elif treebank_tag.startswith('R'):
            return wordnet.ADV

        else:
            return wordnet.NOUN 
开发者ID:zadewg,项目名称:Election-Meddling,代码行数:21,代码来源:deploy.py

示例4: graph_synsets

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def graph_synsets(terms, pos=wn.NOUN, depth=2):
    """
    Create a networkx graph of the given terms to the given depth.
    """

    G = nx.Graph(
        name="WordNet Synsets Graph for {}".format(", ".join(terms)), depth=depth,
    )

    def add_term_links(G, term, current_depth):
        for syn in wn.synsets(term):
            for name in syn.lemma_names():
                G.add_edge(term, name)
                if current_depth < depth:
                    add_term_links(G, name, current_depth+1)

    for term in terms:
        add_term_links(G, term, 0)

    return G 
开发者ID:foxbook,项目名称:atap,代码行数:22,代码来源:syngraph.py

示例5: pos_tag_text

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def pos_tag_text(text):
    
    def penn_to_wn_tags(pos_tag):
        if pos_tag.startswith('J'):
            return wn.ADJ
        elif pos_tag.startswith('V'):
            return wn.VERB
        elif pos_tag.startswith('N'):
            return wn.NOUN
        elif pos_tag.startswith('R'):
            return wn.ADV
        else:
            return None
    
    tagged_text = tag(text)
    tagged_lower_text = [(word.lower(), penn_to_wn_tags(pos_tag))
                         for word, pos_tag in
                         tagged_text]
    return tagged_lower_text
    
# lemmatize text based on POS tags 
开发者ID:dipanjanS,项目名称:text-analytics-with-python,代码行数:23,代码来源:normalization.py

示例6: _pos_tuples

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def _pos_tuples():
    return [
        (wn.NOUN,'N','noun'),
        (wn.VERB,'V','verb'),
        (wn.ADJ,'J','adj'),
        (wn.ADV,'R','adv')] 
开发者ID:rafasashi,项目名称:razzy-spinner,代码行数:8,代码来源:wordnet_app.py

示例7: get_wordnet_pos

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def get_wordnet_pos(treebank_tag):
    if treebank_tag.startswith('J'):
        return wordnet.ADJ
    elif treebank_tag.startswith('V'):
        return wordnet.VERB
    elif treebank_tag.startswith('N'):
        return wordnet.NOUN
    elif treebank_tag.startswith('R'):
        return wordnet.ADV
    else:
        return None 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:13,代码来源:main.py

示例8: lemmatize_word

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def lemmatize_word(word, pos=wordnet.NOUN):
    return LEMMATIZER.lemmatize(word, pos) 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:4,代码来源:main.py

示例9: lemmatize_sentence

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def lemmatize_sentence(sentence):
    res = []
    sentence_ = get_valid_words(sentence)
    for word, pos in pos_tag(sentence_):
        wordnet_pos = get_wordnet_pos(pos) or wordnet.NOUN
        res.append(lemmatize_word(word, pos=wordnet_pos))
    return res 
开发者ID:ChenglongChen,项目名称:tensorflow-XNN,代码行数:9,代码来源:main.py

示例10: check_robustpca

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def check_robustpca(trainCollection, testCollection, feature):
    ready = True
    
    # check matlab    
    if not check_matlab():
        print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'Matlab is not available or incorrectly configured.')
        ready = False
    
    # check if knn is available
    if not check_knn(trainCollection, testCollection, feature):
        print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'KNN is not available.')        
        ready = False

    # check data files
    datafiles = [ os.path.join(ROOT_PATH, trainCollection, 'TextData', 'id.userid.lemmtags.txt'),
                  os.path.join(ROOT_PATH, trainCollection, 'FeatureData', feature)]
    res = find_missing_files(datafiles)
    if res:
        print_msg('RobustPCA (%s, %s, %s)' % (trainCollection, testCollection, feature), 'the following files or folders are missing:\n%s' % res)
        return False    
              
    # check external dependencies  
    try:
        import h5py
        import numpy
        import scipy.io
        import scipy.sparse
        from nltk.corpus import wordnet as wn
        from nltk.corpus import wordnet_ic
        brown_ic = wordnet_ic.ic('ic-brown.dat')
        wn.morphy('cat')
        wn.synsets('cat', pos=wn.NOUN)
    except Exception, e:
        try:
            import nltk
            nltk.download('brown')
            nltk.download('wordnet')
            nltk.download('wordnet_ic')
        except Exception, e:
            print e
            ready = False 
开发者ID:li-xirong,项目名称:jingwei,代码行数:43,代码来源:check_availability.py

示例11: wup_similarity

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def wup_similarity(tagx, tagy):
    scores = []
    for pos in [wn.NOUN, wn.VERB, wn.ADJ, wn.ADJ_SAT, wn.ADV]:
        try:
            synsetx = wn.synset('%s.%s.01' % (tagx,pos))
            synsety = wn.synset('%s.%s.01' % (tagy,pos))
            score = synsetx.wup_similarity(synsety)
            if score is None:
                score = 0
        except Exception, e:
            score = 0
        scores.append(score) 
开发者ID:li-xirong,项目名称:jingwei,代码行数:14,代码来源:wordnet_similarity.py

示例12: test_pos_tag_convert_penn_to_wn

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def test_pos_tag_convert_penn_to_wn():
    assert pos_tag_convert_penn_to_wn('JJ') == wn.ADJ
    assert pos_tag_convert_penn_to_wn('RB') == wn.ADV
    assert pos_tag_convert_penn_to_wn('NN') == wn.NOUN
    assert pos_tag_convert_penn_to_wn('VB') == wn.VERB

    for tag in ('', 'invalid', None):
        assert pos_tag_convert_penn_to_wn(tag) is None 
开发者ID:WZBSocialScienceCenter,项目名称:tmtoolkit,代码行数:10,代码来源:test_preprocess_func.py

示例13: __get_wordnet_pos

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def __get_wordnet_pos(self, treebank_tag):

        if treebank_tag.startswith("J"):
            return wordnet.ADJ
        elif treebank_tag.startswith("V"):
            return wordnet.VERB
        elif treebank_tag.startswith("N"):
            return wordnet.NOUN
        elif treebank_tag.startswith("R"):
            return wordnet.ADV
        else:
            return "" 
开发者ID:DongjunLee,项目名称:quantified-self,代码行数:14,代码来源:disintegrator.py

示例14: tagwn

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def tagwn(self, tag):
        """
        Returns the WordNet tag from the Penn Treebank tag.
        """
        return {
            'N': wn.NOUN,
            'V': wn.VERB,
            'R': wn.ADV,
            'J': wn.ADJ
        }.get(tag[0], wn.NOUN) 
开发者ID:DistrictDataLabs,项目名称:partisan-discourse,代码行数:12,代码来源:learn.py

示例15: get_wordnet_pos

# 需要导入模块: from nltk.corpus import wordnet [as 别名]
# 或者: from nltk.corpus.wordnet import NOUN [as 别名]
def get_wordnet_pos(treebank_tag):
        """ Converts a Penn Tree-Bank part of speech tag into a corresponding WordNet-friendly tag. 
        Borrowed from: http://stackoverflow.com/questions/15586721/wordnet-lemmatization-and-pos-tagging-in-python. """
        if treebank_tag.startswith('J') or treebank_tag.startswith('A'):
            return wordnet.ADJ
        elif treebank_tag.startswith('V'):
            return wordnet.VERB
        elif treebank_tag.startswith('N'):
            return wordnet.NOUN
        elif treebank_tag.startswith('R'):
            return wordnet.ADV
        else:
            return 'OTHER' 
开发者ID:demelin,项目名称:Sentence-similarity-classifier-for-pyTorch,代码行数:15,代码来源:sick_extender.py


注:本文中的nltk.corpus.wordnet.NOUN属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。