本文整理汇总了Python中flair.embeddings.WordEmbeddings方法的典型用法代码示例。如果您正苦于以下问题:Python embeddings.WordEmbeddings方法的具体用法?Python embeddings.WordEmbeddings怎么用?Python embeddings.WordEmbeddings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类flair.embeddings
的用法示例。
在下文中一共展示了embeddings.WordEmbeddings方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transform
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import WordEmbeddings [as 别名]
def transform(self, X: dt.Frame):
X.replace([None, math.inf, -math.inf], self._repl_val)
from flair.embeddings import WordEmbeddings, BertEmbeddings, DocumentPoolEmbeddings, Sentence
if self.embedding_name in ["glove", "en"]:
self.embedding = WordEmbeddings(self.embedding_name)
elif self.embedding_name in ["bert"]:
self.embedding = BertEmbeddings()
self.doc_embedding = DocumentPoolEmbeddings([self.embedding])
output = []
X = X.to_pandas()
text1_arr = X.iloc[:, 0].values
text2_arr = X.iloc[:, 1].values
for ind, text1 in enumerate(text1_arr):
try:
text1 = Sentence(str(text1).lower())
self.doc_embedding.embed(text1)
text2 = text2_arr[ind]
text2 = Sentence(str(text2).lower())
self.doc_embedding.embed(text2)
score = cosine_similarity(text1.get_embedding().reshape(1, -1),
text2.get_embedding().reshape(1, -1))[0, 0]
output.append(score)
except:
output.append(-99)
return np.array(output)
示例2: __init__
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import WordEmbeddings [as 别名]
def __init__(self, device_number='cuda:2', use_cuda = True):
self.device_number = device_number
if use_cuda:
flair.device = torch.device(self.device_number)
self.stacked_embeddings = StackedEmbeddings([WordEmbeddings('glove'),
FlairEmbeddings('news-forward'),
FlairEmbeddings('news-backward'),
])
示例3: load_context_embeddings_with_flair
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import WordEmbeddings [as 别名]
def load_context_embeddings_with_flair(direction='bi', word_embeddings=True,
cache_dir=DEFAULT_CACHE_DIR,
verbose=False):
"""
:param bidirectional:
:param cache_dir:
:param verbose:
"""
from flair.embeddings import FlairEmbeddings
from flair.embeddings import WordEmbeddings
from flair.embeddings import StackedEmbeddings
embeddings = []
if word_embeddings:
fasttext_embedding = WordEmbeddings('da')
embeddings.append(fasttext_embedding)
if direction == 'bi' or direction == 'fwd':
fwd_weight_path = download_model('flair.fwd', cache_dir,
verbose=verbose,
process_func=_unzip_process_func)
embeddings.append(FlairEmbeddings(fwd_weight_path))
if direction == 'bi' or direction == 'bwd':
bwd_weight_path = download_model('flair.bwd', cache_dir,
verbose=verbose,
process_func=_unzip_process_func)
embeddings.append(FlairEmbeddings(bwd_weight_path))
if len(embeddings) == 1:
return embeddings[0]
return StackedEmbeddings(embeddings=embeddings)