本文整理汇总了Python中utils.nlp_utils._tokenize函数的典型用法代码示例。如果您正苦于以下问题:Python _tokenize函数的具体用法?Python _tokenize怎么用?Python _tokenize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_tokenize函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
pos_list = _inter_norm_pos_list(obs_ngrams, target_ngrams)
return pos_list
示例2: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
s = 0.
for w1 in obs_ngrams:
for w2 in target_ngrams:
if dist_utils._is_str_match(w1, w2, self.str_match_threshold):
s += 1.
return np_utils._try_divide(s, len(obs_ngrams)*len(target_ngrams))
示例3: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
val_list = []
for w1 in obs_ngrams:
s = 0.
for w2 in target_ngrams:
if dist_utils._is_str_match(w1, w2, self.str_match_threshold):
s += 1.
val_list.append(np_utils._try_divide(s, len(target_ngrams)))
if len(val_list) == 0:
val_list = [config.MISSING_VALUE_NUMERIC]
return val_list
示例4: transform_one
def transform_one(self, obs, target, id):
df = self.dfTrain[self.dfTrain["search_term"] == obs].copy()
val_list = [config.MISSING_VALUE_NUMERIC]
if df is not None:
df = df[df["id"] != id].copy()
df = df[df["relevance"] == self.relevance].copy()
if df is not None and df.shape[0] > 0:
target_tokens = nlp_utils._tokenize(target, token_pattern)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
val_list = []
for x in df[self.target_field]:
x_tokens = nlp_utils._tokenize(x, token_pattern)
x_ngrams = ngram_utils._ngrams(x_tokens, self.ngram)
val_list.append(dist_utils._jaccard_coef(x_ngrams, target_ngrams))
return val_list
示例5: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
counter = Counter(obs_tokens)
count = np.asarray(list(counter.values()))
proba = count/np.sum(count)
entropy = -np.sum(proba*np.log(proba))
return entropy
示例6: _get_avg_ngram_doc_len
def _get_avg_ngram_doc_len(self):
lst = []
for target in self.target_corpus:
target_tokens = nlp_utils._tokenize(target, token_pattern)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
lst.append(len(target_ngrams))
return np.mean(lst)
示例7: __iter__
def __iter__(self):
for column in self.columns:
for sentence in self.df[column]:
if not sentence in self.sent_label:
self.cnt += 1
self.sent_label[sentence] = "SENT_%d"%self.cnt
tokens = nlp_utils._tokenize(sentence, token_pattern)
yield LabeledSentence(words=tokens, tags=[self.sent_label[sentence]])
示例8: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
val_list = []
for w1 in obs_ngrams:
_val_list = []
for w2 in target_ngrams:
s = dist_utils._edit_dist(w1, w2)
_val_list.append(s)
if len(_val_list) == 0:
_val_list = [ config.MISSING_VALUE_NUMERIC ]
val_list.append( _val_list )
if len(val_list) == 0:
val_list = [ [config.MISSING_VALUE_NUMERIC] ]
return val_list
示例9: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_synset_list = [wn.synsets(obs_token) for obs_token in obs_tokens]
target_synset_list = [wn.synsets(target_token) for target_token in target_tokens]
val_list = []
for obs_synset in obs_synset_list:
_val_list = []
for target_synset in target_synset_list:
_s = self._maximum_similarity_for_two_synset_list(obs_synset, target_synset)
_val_list.append(_s)
if len(_val_list) == 0:
_val_list = [config.MISSING_VALUE_NUMERIC]
val_list.append( _val_list )
if len(val_list) == 0:
val_list = [[config.MISSING_VALUE_NUMERIC]]
return val_list
示例10: transform_one
def transform_one(self, obs, target, id):
val_list = []
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
for obs_token in obs_tokens:
_val_list = []
if obs_token in self.model:
for target_token in target_tokens:
if target_token in self.model:
sim = dist_utils._cosine_sim(self.model[obs_token], self.model[target_token])
_val_list.append(sim)
if len(_val_list) == 0:
_val_list = [config.MISSING_VALUE_NUMERIC]
val_list.append( _val_list )
if len(val_list) == 0:
val_list = [[config.MISSING_VALUE_NUMERIC]]
return val_list
示例11: _get_df_dict
def _get_df_dict(self):
# smoothing
d = defaultdict(lambda : 1)
for target in self.target_corpus:
target_tokens = nlp_utils._tokenize(target, token_pattern)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
for w in set(target_ngrams):
d[w] += 1
return d
示例12: transform_one
def transform_one(self, obs, target, id):
obs_tokens = nlp_utils._tokenize(obs, token_pattern)
target_tokens = nlp_utils._tokenize(target, token_pattern)
obs_ngrams = ngram_utils._ngrams(obs_tokens, self.ngram)
target_ngrams = ngram_utils._ngrams(target_tokens, self.ngram)
return np_utils._try_divide(self._get_match_count(obs_ngrams, target_ngrams, self.idx), len(target_ngrams))