本文整理汇总了Python中flair.embeddings.StackedEmbeddings方法的典型用法代码示例。如果您正苦于以下问题:Python embeddings.StackedEmbeddings方法的具体用法?Python embeddings.StackedEmbeddings怎么用?Python embeddings.StackedEmbeddings使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类flair.embeddings
的用法示例。
在下文中一共展示了embeddings.StackedEmbeddings方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_flair
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import StackedEmbeddings [as 别名]
def load_flair(mode = 'flair'):
if mode == 'flair':
stacked_embeddings = StackedEmbeddings([
WordEmbeddings('glove'),
PooledFlairEmbeddings('news-forward', pooling='min'),
PooledFlairEmbeddings('news-backward', pooling='min')
])
else:##bert
stacked_embeddings = BertEmbeddings('bert-base-uncased') ##concat last 4 layers give the best
return stacked_embeddings
示例2: __init__
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import StackedEmbeddings [as 别名]
def __init__(self, device_number='cuda:2', use_cuda = True):
self.device_number = device_number
if use_cuda:
flair.device = torch.device(self.device_number)
self.stacked_embeddings = StackedEmbeddings([WordEmbeddings('glove'),
FlairEmbeddings('news-forward'),
FlairEmbeddings('news-backward'),
])
示例3: main
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import StackedEmbeddings [as 别名]
def main(data_folder: str, model_folder: str, dev_size: float, nb_epochs: int,
nb_segment: Optional[int], segment: Optional[int]) -> None:
nlp = spacy.blank('fr')
nlp.tokenizer = get_tokenizer(nlp)
corpus: Corpus = prepare_flair_train_test_corpus(spacy_model=nlp, data_folder=data_folder, dev_size=dev_size,
nb_segment=nb_segment, segment=segment)
tag_dictionary = corpus.make_tag_dictionary(tag_type='ner')
print(tag_dictionary.idx2item)
embedding_types: List[TokenEmbeddings] = [
WordEmbeddings('fr'),
FlairEmbeddings('fr-forward'),
FlairEmbeddings('fr-backward'),
]
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
use_crf=True,
tag_dictionary=tag_dictionary,
tag_type='ner')
trainer: ModelTrainer = ModelTrainer(model=tagger, corpus=corpus, use_tensorboard=True)
trainer.train(model_folder,
max_epochs=nb_epochs,
mini_batch_size=32,
embeddings_storage_mode="cpu",
checkpoint=False,
)
示例4: load_context_embeddings_with_flair
# 需要导入模块: from flair import embeddings [as 别名]
# 或者: from flair.embeddings import StackedEmbeddings [as 别名]
def load_context_embeddings_with_flair(direction='bi', word_embeddings=True,
cache_dir=DEFAULT_CACHE_DIR,
verbose=False):
"""
:param bidirectional:
:param cache_dir:
:param verbose:
"""
from flair.embeddings import FlairEmbeddings
from flair.embeddings import WordEmbeddings
from flair.embeddings import StackedEmbeddings
embeddings = []
if word_embeddings:
fasttext_embedding = WordEmbeddings('da')
embeddings.append(fasttext_embedding)
if direction == 'bi' or direction == 'fwd':
fwd_weight_path = download_model('flair.fwd', cache_dir,
verbose=verbose,
process_func=_unzip_process_func)
embeddings.append(FlairEmbeddings(fwd_weight_path))
if direction == 'bi' or direction == 'bwd':
bwd_weight_path = download_model('flair.bwd', cache_dir,
verbose=verbose,
process_func=_unzip_process_func)
embeddings.append(FlairEmbeddings(bwd_weight_path))
if len(embeddings) == 1:
return embeddings[0]
return StackedEmbeddings(embeddings=embeddings)