当前位置: 首页>>代码示例>>Python>>正文


Python elmo.Elmo方法代码示例

本文整理汇总了Python中allennlp.modules.elmo.Elmo方法的典型用法代码示例。如果您正苦于以下问题:Python elmo.Elmo方法的具体用法?Python elmo.Elmo怎么用?Python elmo.Elmo使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在allennlp.modules.elmo的用法示例。


在下文中一共展示了elmo.Elmo方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_elmo

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def test_elmo(self):
        # Correctness checks are in ElmoBiLm and ScalarMix, here we just add a shallow test
        # to ensure things execute.
        sentences = [
            ["The", "sentence", "."],
            ["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
        ]

        character_ids = self._sentences_to_ids(sentences)
        output = self.elmo(character_ids)
        elmo_representations = output["elmo_representations"]
        mask = output["mask"]

        assert len(elmo_representations) == 2
        assert list(elmo_representations[0].size()) == [2, 7, 32]
        assert list(elmo_representations[1].size()) == [2, 7, 32]
        assert list(mask.size()) == [2, 7] 
开发者ID:allenai,项目名称:allennlp,代码行数:19,代码来源:elmo_test.py

示例2: test_elmo_keep_sentence_boundaries

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def test_elmo_keep_sentence_boundaries(self):
        sentences = [
            ["The", "sentence", "."],
            ["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
        ]
        elmo = Elmo(
            self.options_file, self.weight_file, 2, dropout=0.0, keep_sentence_boundaries=True
        )
        character_ids = self._sentences_to_ids(sentences)
        output = elmo(character_ids)
        elmo_representations = output["elmo_representations"]
        mask = output["mask"]

        assert len(elmo_representations) == 2
        # Add 2 to the lengths because we're keeping the start and end of sentence tokens.
        assert list(elmo_representations[0].size()) == [2, 7 + 2, 32]
        assert list(elmo_representations[1].size()) == [2, 7 + 2, 32]
        assert list(mask.size()) == [2, 7 + 2] 
开发者ID:allenai,项目名称:allennlp,代码行数:20,代码来源:elmo_test.py

示例3: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(self,
                 options_file     ,
                 weight_file     ,
                 do_layer_norm       = False,
                 dropout        = 0.5,
                 requires_grad       = False,
                 projection_dim      = None,
                 vocab_to_cache            = None)        :
        super(ElmoTokenEmbedder, self).__init__()

        self._elmo = Elmo(options_file,
                          weight_file,
                          1,
                          do_layer_norm=do_layer_norm,
                          dropout=dropout,
                          requires_grad=requires_grad,
                          vocab_to_cache=vocab_to_cache)
        if projection_dim:
            self._projection = torch.nn.Linear(self._elmo.get_output_dim(), projection_dim)
        else:
            self._projection = None 
开发者ID:plasticityai,项目名称:magnitude,代码行数:23,代码来源:elmo_token_embedder.py

示例4: test_elmo_4D_input

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def test_elmo_4D_input(self):
        sentences = [[[u'The', u'sentence', u'.'],
                      [u'ELMo', u'helps', u'disambiguate', u'ELMo', u'from', u'Elmo', u'.']],
                     [[u'1', u'2'], [u'1', u'2', u'3', u'4', u'5', u'6', u'7']],
                     [[u'1', u'2', u'3', u'4', u'50', u'60', u'70'], [u'The']]]

        all_character_ids = []
        for batch_sentences in sentences:
            all_character_ids.append(self._sentences_to_ids(batch_sentences))

        # (2, 3, 7, 50)
        character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
        embeddings_4d = self.elmo(character_ids)

        # Run the individual batches.
        embeddings_3d = []
        for char_ids in all_character_ids:
            self.elmo._elmo_lstm._elmo_lstm.reset_states()
            embeddings_3d.append(self.elmo(char_ids))

        for k in range(3):
            numpy.testing.assert_array_almost_equal(
                    embeddings_4d[u'elmo_representations'][0][:, k, :, :].data.numpy(),
                    embeddings_3d[k][u'elmo_representations'][0].data.numpy()
            ) 
开发者ID:plasticityai,项目名称:magnitude,代码行数:27,代码来源:elmo_test.py

示例5: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(self, char_table, conf):
        super(ElmoLayer, self).__init__()
        self.conf = conf
        lookup, length = char_table
        self.lookup = nn.Embedding(lookup.size(0), lookup.size(1))
        self.lookup.weight.data.copy_(lookup)
        self.lookup.weight.requires_grad = False
        self.elmo = Elmo(
            os.path.expanduser(self.conf.elmo_options), os.path.expanduser(self.conf.elmo_weights),
            num_output_representations=2, do_layer_norm=False, dropout=self.conf.embed_dropout
        )
        for p in self.elmo.parameters():
            p.requires_grad = False
        self.w = nn.Parameter(torch.Tensor([0.5, 0.5]))
        self.gamma = nn.Parameter(torch.ones(1))
        self.conv = nn.Conv1d(1024, self.conf.elmo_dim, 1)
        nn.init.xavier_uniform(self.conv.weight)
        self.conv.bias.data.fill_(0) 
开发者ID:hxbai,项目名称:Deep_Enhanced_Repr_for_IDRR,代码行数:20,代码来源:model.py

示例6: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(self, n_classes, dropout=.5):
        super().__init__()
        self.dropout = dropout
        # This turns off gradient updates for the elmo model, but still leaves scalar mixture
        # parameters as tunable, provided that references to the scalar mixtures are extracted
        # and plugged into the optimizer
        self.elmo = Elmo(ELMO_OPTIONS_FILE, ELMO_WEIGHTS_FILE, 2, dropout=dropout, requires_grad=False)
        self.classifier = nn.Sequential(
            nn.Linear(2 * ELMO_DIM, n_classes),
            nn.BatchNorm1d(n_classes),
            nn.Dropout(dropout)
        ) 
开发者ID:Pinafore,项目名称:qb,代码行数:14,代码来源:elmo.py

示例7: get_elmo_class

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def get_elmo_class():
    # Avoid a hard dependency by only importing Elmo if it's being used
    from allennlp.modules.elmo import Elmo
    return Elmo

# %% 
开发者ID:nikitakit,项目名称:self-attentive-parser,代码行数:8,代码来源:parse_nk.py

示例8: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(self, hps):
        super(ELMoEndoer, self).__init__()

        self._hps = hps
        self.sent_max_len = hps.sent_max_len

        from allennlp.modules.elmo import Elmo

        elmo_dim = 1024
        options_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_options.json"
        weight_file = "/remote-home/dqwang/ELMo/elmo_2x4096_512_2048cnn_2xhighway_5.5B_weights.hdf5"

        # elmo_dim = 512
        # options_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_options.json"
        # weight_file = "/remote-home/dqwang/ELMo/elmo_2x2048_256_2048cnn_1xhighway_weights.hdf5"

        embed_size = hps.word_emb_dim
        sent_max_len = hps.sent_max_len

        input_channels = 1
        out_channels = hps.output_channel
        min_kernel_size = hps.min_kernel_size
        max_kernel_size = hps.max_kernel_size
        width = embed_size

        # elmo embedding
        self.elmo = Elmo(options_file, weight_file, 1, dropout=0)
        self.embed_proj = nn.Linear(elmo_dim, embed_size)

        # position embedding
        self.position_embedding = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(sent_max_len + 1, embed_size, padding_idx=0), freeze=True)

        # cnn
        self.convs = nn.ModuleList([nn.Conv2d(input_channels, out_channels, kernel_size = (height, width)) for height in range(min_kernel_size, max_kernel_size+1)])
        logger.info("[INFO] Initing W for CNN.......")
        for conv in self.convs:
            init_weight_value = 6.0
            init.xavier_normal_(conv.weight.data, gain=np.sqrt(init_weight_value))
            fan_in, fan_out = Encoder.calculate_fan_in_and_fan_out(conv.weight.data)
            std = np.sqrt(init_weight_value) * np.sqrt(2.0 / (fan_in + fan_out)) 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:42,代码来源:Encoder.py

示例9: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(
        self,
        dropout_value: float = 0.5,
        datasets_manager: DatasetsManager = None,
        word_tokens_namespace: str = "tokens",
        device: torch.device = torch.device("cpu"),
        fine_tune: bool = False,
    ):
        super(ElmoEmbedder, self).__init__()

        # Sometimes you need two different tensors that are
        # two different linear combination of representations
        # TODO: change this in-case you need 2 representations
        self.num_output_representations = 1
        self.dropout_value = dropout_value
        self.datasets_manager = datasets_manager
        self.device = torch.device(device) if isinstance(device, str) else device
        self.msg_printer = wasabi.Printer()
        self.word_tokens_namespace = word_tokens_namespace
        self.fine_tune = fine_tune
        self.embedder_name = "ElmoEmbedder"

        with self.msg_printer.loading("Loading Elmo Object"):
            self.elmo: nn.Module = Elmo(
                options_file=ELMO_OPTIONS_FILE,
                weight_file=ELMO_WEIGHTS_FILE,
                num_output_representations=self.num_output_representations,
                dropout=self.dropout_value,
                requires_grad=fine_tune,
            )

        self.msg_printer.good(f"Finished Loading ELMO object") 
开发者ID:abhinavkashyap,项目名称:sciwing,代码行数:34,代码来源:elmo_embedder.py

示例10: __init__

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def __init__(
        self,
        options_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
        + "elmo_2x4096_512_2048cnn_2xhighway_options.json",
        weight_file: str = "https://allennlp.s3.amazonaws.com/models/elmo/2x4096_512_2048cnn_2xhighway/"
        + "elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5",
        do_layer_norm: bool = False,
        dropout: float = 0.5,
        requires_grad: bool = False,
        projection_dim: int = None,
        vocab_to_cache: List[str] = None,
        scalar_mix_parameters: List[float] = None,
    ) -> None:
        super().__init__()

        self._elmo = Elmo(
            options_file,
            weight_file,
            1,
            do_layer_norm=do_layer_norm,
            dropout=dropout,
            requires_grad=requires_grad,
            vocab_to_cache=vocab_to_cache,
            scalar_mix_parameters=scalar_mix_parameters,
        )
        if projection_dim:
            self._projection = torch.nn.Linear(self._elmo.get_output_dim(), projection_dim)
            self.output_dim = projection_dim
        else:
            self._projection = None
            self.output_dim = self._elmo.get_output_dim() 
开发者ID:allenai,项目名称:allennlp,代码行数:33,代码来源:elmo_token_embedder.py

示例11: setup_method

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def setup_method(self):
        super().setup_method()

        self.elmo = Elmo(self.options_file, self.weight_file, 2, dropout=0.0) 
开发者ID:allenai,项目名称:allennlp,代码行数:6,代码来源:elmo_test.py

示例12: test_elmo_4D_input

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def test_elmo_4D_input(self):
        sentences = [
            [
                ["The", "sentence", "."],
                ["ELMo", "helps", "disambiguate", "ELMo", "from", "Elmo", "."],
            ],
            [["1", "2"], ["1", "2", "3", "4", "5", "6", "7"]],
            [["1", "2", "3", "4", "50", "60", "70"], ["The"]],
        ]

        all_character_ids = []
        for batch_sentences in sentences:
            all_character_ids.append(self._sentences_to_ids(batch_sentences))

        # (2, 3, 7, 50)
        character_ids = torch.cat([ids.unsqueeze(1) for ids in all_character_ids], dim=1)
        embeddings_4d = self.elmo(character_ids)

        # Run the individual batches.
        embeddings_3d = []
        for char_ids in all_character_ids:
            self.elmo._elmo_lstm._elmo_lstm.reset_states()
            embeddings_3d.append(self.elmo(char_ids))

        for k in range(3):
            numpy.testing.assert_array_almost_equal(
                embeddings_4d["elmo_representations"][0][:, k, :, :].data.numpy(),
                embeddings_3d[k]["elmo_representations"][0].data.numpy(),
            ) 
开发者ID:allenai,项目名称:allennlp,代码行数:31,代码来源:elmo_test.py

示例13: test_elmo_bilm_can_handle_higher_dimensional_input_with_cache

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def test_elmo_bilm_can_handle_higher_dimensional_input_with_cache(self):
        sentences = [["This", "is", "a", "sentence"], ["Here", "'s", "one"], ["Another", "one"]]
        vocab, tensor = self.get_vocab_and_both_elmo_indexed_ids(sentences)
        words_to_cache = list(vocab.get_token_to_index_vocabulary("tokens").keys())
        elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
        elmo_bilm.eval()

        individual_dim = elmo_bilm(
            tensor["character_ids"]["elmo_tokens"], tensor["tokens"]["tokens"]
        )
        elmo_bilm = Elmo(self.options_file, self.weight_file, 1, vocab_to_cache=words_to_cache)
        elmo_bilm.eval()

        expanded_word_ids = torch.stack([tensor["tokens"]["tokens"] for _ in range(4)], dim=1)
        expanded_char_ids = torch.stack(
            [tensor["character_ids"]["elmo_tokens"] for _ in range(4)], dim=1
        )
        expanded_result = elmo_bilm(expanded_char_ids, expanded_word_ids)
        split_result = [
            x.squeeze(1) for x in torch.split(expanded_result["elmo_representations"][0], 1, dim=1)
        ]
        for expanded in split_result:
            numpy.testing.assert_array_almost_equal(
                expanded.data.cpu().numpy(),
                individual_dim["elmo_representations"][0].data.cpu().numpy(),
            ) 
开发者ID:allenai,项目名称:allennlp,代码行数:28,代码来源:elmo_test.py

示例14: load_allennlp_elmo

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def load_allennlp_elmo(
        options_file: str,
        weight_file: str,
) -> Any:
    from allennlp.modules.elmo import Elmo
    return Elmo(
            options_file,
            weight_file,
            num_output_representations=1,
            dropout=0.0,
            scalar_mix_parameters=[1.0, 1.0, 1.0],
    ) 
开发者ID:cnt-dev,项目名称:pytorch-fast-elmo,代码行数:14,代码来源:profile.py

示例15: load_elmo

# 需要导入模块: from allennlp.modules import elmo [as 别名]
# 或者: from allennlp.modules.elmo import Elmo [as 别名]
def load_elmo(self, elmo_options_file, elmo_weights_file):
        device = self.word_embedding.embedding.weight.device
        from allennlp.modules.elmo import Elmo
        self.elmo = Elmo(elmo_options_file, elmo_weights_file, 1, dropout=0).to(device) 
开发者ID:uwnlp,项目名称:piqa,代码行数:6,代码来源:model.py


注:本文中的allennlp.modules.elmo.Elmo方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。