當前位置: 首頁>>代碼示例>>Python>>正文


Python nltk.RegexpTokenizer方法代碼示例

本文整理匯總了Python中nltk.RegexpTokenizer方法的典型用法代碼示例。如果您正苦於以下問題:Python nltk.RegexpTokenizer方法的具體用法?Python nltk.RegexpTokenizer怎麽用?Python nltk.RegexpTokenizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在nltk的用法示例。


在下文中一共展示了nltk.RegexpTokenizer方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load_data

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def load_data():
    global N, words

    raw = list(word 
            for fileid in corpus.fileids()
            for word in corpus.words(fileid))
    words = list(token for token in RegexpTokenizer('\w+').tokenize(' '.join(raw)))[100:1000]
    tokens = set(words)
    tokens_l = list(tokens)
    N = len(tokens)
    print 'Corpus size: {} words'.format(N)

    step = 4
    data = []
    for gram in ngrams(words, step):
        w1, w2, w3, pred = gram
        V = Vol(1, 1, N, 0.0)
        V.w[tokens_l.index(w1)] = 1
        V.w[tokens_l.index(w2)] = 1
        V.w[tokens_l.index(w3)] = 1
        label = tokens_l.index(pred)
        data.append((V, label))

    return data 
開發者ID:benglard,項目名稱:ConvNetPy,代碼行數:26,代碼來源:next_word.py

示例2: test

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def test(): 
    gt = GetTweets()
    documents = gt.get_hashtag('ferguson', count=20)
    documents += gt.get_hashtag('police', count=21)
    print 'Query:', documents[-1]

    tokenizer = RegexpTokenizer('\w+')
    vols = []
    for doc in documents:
        samples = []
        for token in tokenizer.tokenize(doc):
            word = token.lower()
            if word not in ENGLISH_STOP_WORDS and word not in punctuation:
                samples.append(word)
        vols.append(volumize(FreqDist(samples)))

    vectors = [ doc_code(v) for v in vols[:-1] ]
    query_vec = doc_code(vols[-1])

    sims = [ cos(v, query_vec) for v in vectors ]
    m = max(sims)
    print m, documents[sims.index(m)] 
開發者ID:benglard,項目名稱:ConvNetPy,代碼行數:24,代碼來源:similarity.py

示例3: convert_to_vw

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def convert_to_vw(text):
    tokenizer = nltk.RegexpTokenizer(r'\w+')
    lmtzr = WordNetLemmatizer()
    tokens = [t.lower() for t in tokenizer.tokenize(text)]
    id_ = 13371337
    processed = []
    for t in tokens:
        l = lmtzr.lemmatize(t)
        processed.append(l)
    counted = Counter(processed)
    res_str = str(id_)
    for k, v in counted.items():
        if v != 1:
            res_str = res_str + " {}:{}".format(k, v)
        else:
            res_str = res_str + " {}".format(k)
    return res_str 
開發者ID:sld,項目名稱:convai-bot-1337,代碼行數:19,代碼來源:tokenizing.py

示例4: update_hashtags_stats

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def update_hashtags_stats(hashtags_fd, json_tweet):
    tweet = utils.extract_tweet_from_json(json_tweet)
    tweet_terms = []
    if tweet is None or '#' not in tweet:
        return False
    tokenizer = nltk.RegexpTokenizer('\#?[\w\d]+')
    doc = tokenizer.tokenize(tweet)
    for w_raw in doc:
        if '#' not in w_raw:
            continue
        w = (w_raw.strip('\"\'.,;?!:)(@/*&')).lower()
        tweet_terms.append(w)
        hashtags_fd.inc(w)
    return True


#processes the tweet and updates terms_fd based on the tweet terms
#specifically, if the term was already encountered it adds it to the freq dict,
# otherwise it increases the term counter 
開發者ID:sajao,項目名稱:CrisisLex,代碼行數:21,代碼來源:adaptive_collect.py

示例5: getTokens

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def getTokens(self, removeStopwords=True):
        """ Tokenizes the text, breaking it up into words, removing punctuation. """
        tokenizer = nltk.RegexpTokenizer('[a-zA-Z]\w+\'?\w*') # A custom regex tokenizer.
        spans = list(tokenizer.span_tokenize(self.text))
        # Take note of how many spans there are in the text
        self.length = spans[-1][-1]
        tokens = tokenizer.tokenize(self.text)
        tokens = [ token.lower() for token in tokens ] # make them lowercase
        stemmer = LancasterStemmer()
        tokens = [ stemmer.stem(token) for token in tokens ]
        if not removeStopwords:
            self.spans = spans
            return tokens
        tokenSpans = list(zip(tokens, spans)) # zip it up
        stopwords = nltk.corpus.stopwords.words('english') # get stopwords
        tokenSpans = [ token for token in tokenSpans if token[0] not in stopwords ] # remove stopwords from zip
        self.spans = [ x[1] for x in tokenSpans ] # unzip; get spans
        return [ x[0] for x in tokenSpans ] # unzip; get tokens 
開發者ID:JonathanReeve,項目名稱:text-matcher,代碼行數:20,代碼來源:matcher.py

示例6: get_tokenize

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def get_tokenize():
    return RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:4,代碼來源:utils.py

示例7: preprocess_data

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def preprocess_data(text):
    global sentences, tokenized
    tokenizer = nltk.RegexpTokenizer(r'\w+')

    sentences =  nltk.sent_tokenize(text)
    tokenized = [tokenizer.tokenize(s) for s in sentences]

# import the data 
開發者ID:drabastomek,項目名稱:practicalDataAnalysisCookbook,代碼行數:10,代碼來源:nlp_pos_alternative.py

示例8: get_tokenize

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def get_tokenize():
    return nltk.RegexpTokenizer(r'\w+|#\w+|<\w+>|%\w+|[^\w\s]+').tokenize 
開發者ID:zengjichuan,項目名稱:Topic_Disc,代碼行數:4,代碼來源:utils.py

示例9: get_chat_tokenize

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def get_chat_tokenize():

    return nltk.RegexpTokenizer(u'\w+|:d|:p|<sil>|<men>|<hash>|<url>|'
                                u'[\U0001f600-\U0001f64f\U0001f300-\U0001f5ff\U0001f680-\U0001f6ff]|'
                                u'[^\w\s]+').tokenize 
開發者ID:zengjichuan,項目名稱:Topic_Disc,代碼行數:7,代碼來源:utils.py

示例10: create_bag_of_words

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def create_bag_of_words(document_list):
    """
    Creates a bag of words representation of the document list given. It removes
    the punctuation and the stop words.

    :type document_list: list[str]
    :param document_list:
    :rtype: list[list[str]]
    :return:
    """
    tokenizer = RegexpTokenizer(r'\w+')
    tagger = nltk.PerceptronTagger()
    cached_stop_words = set(stopwords.words("english"))
    cached_stop_words |= {
        't', 'didn', 'doesn', 'haven', 'don', 'aren', 'isn', 've', 'll',
        'couldn', 'm', 'hasn', 'hadn', 'won', 'shouldn', 's', 'wasn',
        'wouldn'}
    body = []
    processed = []

    for i in range(0, len(document_list)):
        body.append(document_list[i].lower())

    for entry in body:
        row = tokenizer.tokenize(entry)
        tagged_words = tagger.tag(row)

        nouns = []
        for tagged_word in tagged_words:
            if tagged_word[1].startswith('NN'):
                nouns.append(tagged_word[0])

        nouns = [word for word in nouns if word not in cached_stop_words]
        processed.append(nouns)

    return processed 
開發者ID:melqkiades,項目名稱:yelp,代碼行數:38,代碼來源:lda_context_utils.py

示例11: test

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def test():
    global N, words, network

    print 'In testing.'

    gettysburg = """Four score and seven years ago our fathers brought forth on this continent, a new nation, conceived in Liberty, and dedicated to the proposition that all men are created equal. Now we are engaged in a great civil war, testing whether that nation, or any nation so conceived and so dedicated, can long endure. We are met on a great battle-field of that war. We have come to dedicate a portion of that field, as a final resting place for those who here gave their lives that that nation might live. It is altogether fitting and proper that we should do this. But, in a larger sense, we can not dedicate -- we can not consecrate -- we can not hallow -- this ground. The brave men, living and dead, who struggled here, have consecrated it, far above our poor power to add or detract. The world will little note, nor long remember what we say here, but it can never forget what they did here. It is for us the living, rather, to be dedicated here to the unfinished work which they who fought here have thus far so nobly advanced. It is rather for us to be here dedicated to the great task remaining before us -- that from these honored dead we take increased devotion to that cause for which they gave the last full measure of devotion -- that we here highly resolve that these dead shall not have died in vain -- that this nation, under God, shall have a new birth of freedom -- and that government of the people, by the people, for the people, shall not perish from the earth."""
    tokenizer = RegexpTokenizer('\w+')
    gettysburg_tokens = tokenizer.tokenize(gettysburg) 

    samples = []
    for token in gettysburg_tokens:
        word = token.lower()
        if word not in ENGLISH_STOP_WORDS and word not in punctuation:
            samples.append(word)

    dist = FreqDist(samples)
    V = Vol(1, 1, N, 0.0)
    for i, word in enumerate(words):
        V.w[i] = dist.freq(word)

    pred = network.forward(V).w
    topics = []
    while len(topics) != 5:
        max_act = max(pred)
        topic_idx = pred.index(max_act)
        topic = words[topic_idx]

        if topic in gettysburg_tokens:
            topics.append(topic)
    
        del pred[topic_idx]

    print 'Topics of the Gettysburg Address:'
    print topics 
開發者ID:benglard,項目名稱:ConvNetPy,代碼行數:36,代碼來源:topics.py

示例12: get_chat_tokenize

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def get_chat_tokenize():
    return nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize 
開發者ID:snakeztc,項目名稱:NeuralDialog-ZSDG,代碼行數:4,代碼來源:utils.py

示例13: update_terms_stats

# 需要導入模塊: import nltk [as 別名]
# 或者: from nltk import RegexpTokenizer [as 別名]
def update_terms_stats(terms_fd, json_tweet, lex):
    tweet = utils.extract_tweet_from_json(json_tweet)
    tweet_terms = []
    if tweet is None:
        return False
    tokenizer = nltk.RegexpTokenizer('\#?[\w\d]+')
    doc = tokenizer.tokenize(tweet)
    for w_raw in doc:
        w = w_raw.strip('\"\'.,;?!:)(@/*&')
        if not (w.strip('#')).isalpha():
            w_aux = ''
            #ignore non-ascii characters
            for s in w:
                if ord(s) < 128:
                    w_aux += s
                else:
                    break
            w = w_aux
        w = w.lower()
        if (w not in stopwords.words('english') and w not in set(['rt','http','amp'])) and len(w) in range(3, 16):
            if w in lex:
                continue
            tweet_terms.append(w)
            terms_fd.inc(w)
    bigrams = nltk.bigrams(tweet_terms)
    for b in bigrams:
        if b[1]+" "+b[0] in lex or b[0]+" "+b[1] in lex:
            continue
        if b[1]+" "+b[0] in terms_fd:
            terms_fd.inc(b[1]+" "+b[0])
        else:
            terms_fd.inc(b[0]+" "+b[1])
    return True 
開發者ID:sajao,項目名稱:CrisisLex,代碼行數:35,代碼來源:adaptive_collect.py


注:本文中的nltk.RegexpTokenizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。