当前位置: 首页>>代码示例>>Python>>正文


Python words.words函数代码示例

本文整理汇总了Python中nltk.corpus.words.words函数的典型用法代码示例。如果您正苦于以下问题:Python words函数的具体用法?Python words怎么用?Python words使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了words函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_sample

 def add_sample(self, sample):
     if not isinstance(sample, str):
         raise TypeError
     # Calling add_sample should replace existing sample.
     # To avoid appending new values onto existing lists:
     self.sample = sample
     self.misspelled_words = []
     self.tokenized_sample = []
     self.tagged_sample = {}
     sample = sample.replace('\n', " ")
     sample = sample.rstrip(" ")
     for char in punctuation.replace("'", ""):
         sample = sample.replace(char, "")
     tokens = word_tokenize(sample)
     for word in tokens:
         if word.lower() in words.words():
             self.tokenized_sample.append(word)
         elif word.capitalize() in names.words():
             continue
         elif "'" in word:
             self.tokenized_sample.append(word)
         elif LEMMATIZER.lemmatize(word.lower()) not in words.words():
             if STEMMER.stem(word.lower()) not in words.words():
                 self.misspelled_words.append(word)
         else:
             self.tokenized_sample.append(word)
     self.tagged_sample = pos_tag(tokens)
开发者ID:brythonick,项目名称:pyesol,代码行数:27,代码来源:pyesol.py

示例2: divide

def divide(s):
	first = ''
	for i in range(len(str(s))):
		first += s[i]
		print first
		if first in words.words() and s[i + 1:] in words.words():
			return ' '.join([first, s[i + 1:]])
	return False
开发者ID:agatanyc,项目名称:RC,代码行数:8,代码来源:divide_string.py

示例3: raw_files_to_labeled_features

def raw_files_to_labeled_features(raw_files, label_file):
    # Initialize spark
    conf = SparkConf().setAppName("SpamFilter").setMaster("local[*]")
    sc = SparkContext(conf=conf)

    # Get the set of words that we will be accepting as valid features
    valid_words = set(w.lower() for w in words.words())

    # Load training data and convert to our desired format
    raw_files = sc.wholeTextFiles(raw_files)

    # Extract a document of filtered words from each text file
    documents = raw_files.map(lambda x: (x[0], extract_words(x[1], valid_words)))

    # Calculate TF-IDF values for each document
    tfidf = calculate_tfidf(documents)

    # Load labels
    labels = sc.parallelize(load_labels(label_file)).map(lambda x: x[0])

    # Append indexes to features and labels
    indexed_labels = labels.zipWithIndex().map(lambda x: (x[1],x[0]))
    indexed_features = tfidf.zipWithIndex().map(lambda x: (x[1],x[0]))

    # Join labels and features into tuples and return
    return indexed_labels.join(indexed_features).map(lambda x: x[1]).collect()
开发者ID:agharbin,项目名称:spam-filter-ml,代码行数:26,代码来源:feature_extract.py

示例4: get_vocab

def get_vocab():
    word_list = words.words()
    lowercased = [t.lower() for t in word_list]
    STEMMER = PorterStemmer()
    stemmed = [STEMMER.stem(w) for w in lowercased]
    vocab = list(set(stemmed))
    return vocab
开发者ID:nhu2000,项目名称:wiki-search,代码行数:7,代码来源:kmeans_model.py

示例5: _english_wordlist

 def _english_wordlist(self):
     try:
         wl = self._en_wordlist
     except AttributeError:
         from nltk.corpus import words
         wl = self._en_wordlist = set(words.words('en-basic'))
     return wl
开发者ID:digging-into-data-berkeley,项目名称:cheshire3,代码行数:7,代码来源:extractor.py

示例6: get_english_vocab

def get_english_vocab(lemmatize=False):
    vocab = (w.lower() for w in words.words())

    if lemmatize:
        stemmer = PorterStemmer()
        vocab = (stemmer.stem(w) for w in vocab)
    return set(vocab)
开发者ID:ned2,项目名称:okdata,代码行数:7,代码来源:okreader.py

示例7: __init__

 def __init__(self, dict_path = '/etc/dictionaries-common/words'):
      f = open(dict_path)
      
      # We use two dictionaries for better coverage
      d1 = set([w.lower() for w in f.read().split()])
      d2 = set([w.lower() for w in words.words()])
      
      self.words = set(d1.union(d2))
开发者ID:okkhoy,项目名称:gabe-and-joh,代码行数:8,代码来源:dictionary.py

示例8: unknown

def unknown(list):

    k = re.findall(r'(?<= )+[a-z]+\b', textString)       # Removes punctuation and capitalized words
    print(textString)
    for w in k:                                          # Gets all the words
        if(w not in words.words()):                      # If  website words arent in NLTK word dictionary:
            unW.append(w)                                # Adds the word to the unknown list
    print (unW)                                          # Prints words that are not in the NLTK word dictionary
开发者ID:Plonski,项目名称:Information-Retrieval,代码行数:8,代码来源:unknownWords.py

示例9: __init__

 def __init__(self):
     self.stopwords = stopwords.words('english')
     self.uscities = set([w.lower() for w in gazetteers.words('uscities.txt')])
     self.usstates = set([w.lower() for w in gazetteers.words('usstates.txt')])
     self.countries = set([w.lower() for w in gazetteers.words('countries.txt')])
     self.basicwords = set(words.words('en-basic'))
     self.paragraph_tokens = []
     self.texts = []
开发者ID:yuedong111,项目名称:topical-spiders,代码行数:8,代码来源:topic_dictionary.py

示例10: extractingFromFolders

def extractingFromFolders():
    folder2 = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\Reference')
    fileresult = os.path.expanduser('~\\My Documents\\Tara\\Ongoing\\CharacterCorpus\\results.txt')
    refer = PlaintextCorpusReader(folder2, 'harrygrepster.txt')
    grepster = refer.words()
    results = open(fileresult, 'a')
    completeWords = wordlist.words()
    stoppers = stopwords.words()
    return grepster, results, completeWords, stoppers
开发者ID:taratemima,项目名称:Some-Python-Work,代码行数:9,代码来源:grepsterTest.py

示例11: __init__

    def __init__(self,
                 corpora_list=['all_plaintext.txt', 'big.txt'],
                 parse_args=(True, True, True, True, True)):

        #Set the parsing arguments
        self.remove_stopwords = parse_args[0]
        self.tag_numeric = parse_args[1]
        self.correct_spelling = parse_args[2]
        self.kill_nonwords = parse_args[3]
        self.stem = parse_args[4]

        #Alphabet
        self.alphabet = 'abcdefghijklmnopqrstuvwxyz'

        #Punctuation
        self.punc_dict = {ord(c): None for c in string.punctuation}

        #Reserved tags
        self.reserved_tags = ['numeric_type_hex',
                              'numeric_type_binary',
                              'numeric_type_octal',
                              'numeric_type_float',
                              'numeric_type_int',
                              'numeric_type_complex',
                              'numeric_type_roman',
                              'math_type']

        #Update the set of nltk words with the additional corpora
        self.all_words = set(words.words())
        self.all_words.update('a')
        self.all_words.update('i')
        self.all_words.update(self.reserved_tags)
        self.max_word_length = 20

        #Set up the stopwords, remove 'a' due to math issues
        self.stops = set(stopwords.words("english"))
        self.stops.remove('a')
        self.stops.remove('no')

        #Set up the stemmer
        self.st = SnowballStemmer('english')

        #Train the spelling corrector using all corpora
        train_text = ''
        for cfile in corpora_list:
            words_in_file = file(cfile).read()
            self.all_words.update(self.get_all_words(file(cfile).read()))
            train_text = train_text + words_in_file

        #Remove single character terms
        wordlist = list(self.all_words)
        wordlist = [i for i in wordlist if len(i) > 1]
        self.all_words = set(wordlist)
        self.all_words.update('a')
        self.all_words.update('i')

        self.NWORDS = self.train(self.get_all_words(train_text))
开发者ID:openstax,项目名称:research-eGrader,代码行数:57,代码来源:WordUtility.py

示例12: tokenize4

def tokenize4(text):
	wordnet_lemmatizer = WordNetLemmatizer()
	tokens             = word_tokenize(text)
	wordset            = set(words.words())
	tokens             = [wordnet_lemmatizer.lemmatize(token, NOUN) for token in tokens]
	tokens             = [wordnet_lemmatizer.lemmatize(token, VERB) for token in tokens]
	tokens             = [wordnet_lemmatizer.lemmatize(token, ADJ) for token in tokens]
	tokens             = [token for token in tokens if token in wordset]
	return tokens
开发者ID:SJinping,项目名称:WordProc,代码行数:9,代码来源:wordProcBase.py

示例13: unknown

def unknown(url):
    # get the HTML, as a string
    html = str(bs(urllib.urlopen(url).read()))
    # find all substrings
    substrings = set(re.findall(r'[a-z]+', html))
    # specify the wordlist
    wordlist = words.words()
    # return the words not in the wordlist
    return [word for word in substrings if word not in wordlist]
开发者ID:jonathanmonreal,项目名称:nltk-examples,代码行数:9,代码来源:c3q21.py

示例14: textParse

def textParse(file):
    processedText = ''
    with open(file, 'r') as f:
        lines = f.read().splitlines()
        for line in lines:
            wordsInLine = line.split(' ')
            for word in wordsInLine:
                # print '*'+word+'*'
                if word.lower() in words.words():
                    processedText += word + ' '
    return processedText
开发者ID:WodlBodl,项目名称:visionAssistant,代码行数:11,代码来源:textProcess.py

示例15: getReadabilityScore

def getReadabilityScore(tweet):
    w1 = tweet.split(" ")
    ASL1 = len(w1)
    AOV1 = 0
    l = 0
    for w in w1:
        l+=len(w)
        if(w not in words.words()):
            AOV1+=1
    ASW1 = l/float(ASL1)
    S1 = 206.835 - (1.015*ASL1) - (84.6*ASW1)- (10.5*AOV1)
    return S1
开发者ID:hackuser15,项目名称:239AS,代码行数:12,代码来源:Functions.py


注:本文中的nltk.corpus.words.words函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。