當前位置: 首頁>>代碼示例>>Python>>正文


Python elmo.batch_to_ids方法代碼示例

本文整理匯總了Python中allennlp.modules.elmo.batch_to_ids方法的典型用法代碼示例。如果您正苦於以下問題:Python elmo.batch_to_ids方法的具體用法?Python elmo.batch_to_ids怎麽用?Python elmo.batch_to_ids使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在allennlp.modules.elmo的用法示例。


在下文中一共展示了elmo.batch_to_ids方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: batchify

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def batchify(x_data, y_data, batch_size=128, shuffle=False):
    batches = []
    for i in range(0, len(x_data), batch_size):
        start, stop = i, i + batch_size
        x_batch = batch_to_ids(x_data[start:stop])
        lengths = Variable(torch.from_numpy(np.array([max(len(x), 1) for x in x_data[start:stop]])).float()).view(-1, 1)
        if CUDA:
            y_batch = Variable(torch.from_numpy(np.array(y_data[start:stop])).cuda())
        else:
            y_batch = Variable(torch.from_numpy(np.array(y_data[start:stop])))
        batches.append((x_batch, y_batch, lengths))

    if shuffle:
        random.shuffle(batches)

    return batches 
開發者ID:Pinafore,項目名稱:qb,代碼行數:18,代碼來源:elmo.py

示例2: forward

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def forward(self, lines: List[Line]):
        texts = []
        for line in lines:
            line_tokens = line.tokens[self.word_tokens_namespace]
            line_tokens = list(map(lambda tok: tok.text, line_tokens))
            texts.append(line_tokens)

        character_ids = batch_to_ids(texts)
        character_ids = character_ids.to(self.device)
        output_dict = self.elmo(character_ids)
        # batch_size, max_seq_length * 1024
        embeddings = output_dict["elmo_representations"][0]
        return embeddings 
開發者ID:abhinavkashyap,項目名稱:sciwing,代碼行數:15,代碼來源:elmo_embedder.py

示例3: emb

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def emb(self, word, default=None):
        from allennlp.modules.elmo import batch_to_ids
        idx = batch_to_ids([[word]])
        emb = self.embeddings(idx)['token_embedding']
        return emb[0, 1].tolist() 
開發者ID:vzhong,項目名稱:embeddings,代碼行數:7,代碼來源:elmo.py

示例4: test_batch_to_char_ids

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def test_batch_to_char_ids():
    sentences = [
            ["This", "is", "a", "sentence"],
            ["Here", "'s", "one"],
            ["Another", "one"],
    ]
    t1 = utils.batch_to_char_ids(sentences)
    t2 = batch_to_ids(sentences)
    np.testing.assert_array_equal(t1.numpy(), t2.numpy())

    sentences = [["one"]]
    t1 = utils.batch_to_char_ids(sentences)
    t2 = batch_to_ids(sentences)
    np.testing.assert_array_equal(t1.numpy(), t2.numpy()) 
開發者ID:cnt-dev,項目名稱:pytorch-fast-elmo,代碼行數:16,代碼來源:test_utils.py

示例5: forward

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def forward(self, batch):
        questions = [q.split() for q in batch['question']]
        question_ids = batch_to_ids(questions).cuda()
        elmo_vectors = self.elmo(question_ids)
        cnn_vector = self.cnn_encoder(elmo_vectors['elmo_representations'][0], elmo_vectors['mask'])
        loss = self.loss(cnn_vector, batch['class'].cuda())
        preds = torch.argmax(cnn_vector, dim=1)
        softmax = torch.nn.functional.softmax(cnn_vector, dim=1)
        return loss, preds, softmax 
開發者ID:martiansideofthemoon,項目名稱:squash-generation,代碼行數:11,代碼來源:model.py

示例6: batch_to_embeddings

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def batch_to_embeddings(self, batch                 )                                     :
        u"""
        Parameters
        ----------
        batch : ``List[List[str]]``, required
            A list of tokenized sentences.

        Returns
        -------
            A tuple of tensors, the first representing activations (batch_size, 3, num_timesteps, 1024) and
        the second a mask (batch_size, num_timesteps).
        """
        character_ids = batch_to_ids(batch)
        if self.cuda_device >= 0:
            character_ids = character_ids.cuda(device=self.cuda_device)

        bilm_output = self.elmo_bilm(character_ids)
        layer_activations = bilm_output[u'activations']
        mask_with_bos_eos = bilm_output[u'mask']

        # without_bos_eos is a 3 element list of (activation, mask) tensor pairs,
        # each with size (batch_size, num_timesteps, dim and (batch_size, num_timesteps)
        # respectively.
        without_bos_eos = [remove_sentence_boundaries(layer, mask_with_bos_eos)
                           for layer in layer_activations]
        # Converts a list of pairs (activation, mask) tensors to a single tensor of activations.
        activations = torch.cat([ele[0].unsqueeze(1) for ele in without_bos_eos], dim=1)
        # The mask is the same for each ELMo vector, so just take the first.
        mask = without_bos_eos[0][1]

        return activations, mask 
開發者ID:plasticityai,項目名稱:magnitude,代碼行數:33,代碼來源:elmo.py

示例7: elmo_process

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def elmo_process(batch, device):
            elmo_tensor = batch_to_ids(batch)
            elmo_tensor = elmo_tensor.to(device=device)
            torchtext_tensor = torchtext_process(batch, device)
            return (elmo_tensor, torchtext_tensor) 
開發者ID:yikangshen,項目名稱:Ordered-Memory,代碼行數:7,代碼來源:sentiment.py

示例8: transform

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def transform(self, X, y=None):
        """Transform documents to document ids.

        Uses the vocabulary learned by fit.

        Args:
            X : iterable
            an iterable which yields either str, unicode or file objects.
            y : iterabl, label strings.

        Returns:
            features: document id matrix.
            y: label id matrix.
        """
        word_ids = [self._word_vocab.doc2id(doc) for doc in X]
        word_ids = pad_sequences(word_ids, padding='post')

        char_ids = [[self._char_vocab.doc2id(w) for w in doc] for doc in X]
        char_ids = pad_nested_sequences(char_ids)

        character_ids = batch_to_ids(X)
        elmo_embeddings = self._elmo(character_ids)['elmo_representations'][1]
        elmo_embeddings = elmo_embeddings.detach().numpy()

        features = [word_ids, char_ids, elmo_embeddings]

        if y is not None:
            y = [self._label_vocab.doc2id(doc) for doc in y]
            y = pad_sequences(y, padding='post')
            y = to_categorical(y, self.label_size).astype(int)
            # In 2018/06/01, to_categorical is a bit strange.
            # >>> to_categorical([[1,3]], num_classes=4).shape
            # (1, 2, 4)
            # >>> to_categorical([[1]], num_classes=4).shape
            # (1, 4)
            # So, I expand dimensions when len(y.shape) == 2.
            y = y if len(y.shape) == 3 else np.expand_dims(y, axis=0)
            return features, y
        else:
            return features 
開發者ID:Hironsan,項目名稱:anago,代碼行數:42,代碼來源:preprocessing.py

示例9: forward

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def forward(self, batch_text):
        # batch_char = batch_to_ids(batch_text)
        return self.elmo(batch_text)['elmo_representations'] 
開發者ID:xycforgithub,項目名稱:MultiTask-MRC,代碼行數:5,代碼來源:elmo.py

示例10: create_cached_cnn_embeddings

# 需要導入模塊: from allennlp.modules import elmo [as 別名]
# 或者: from allennlp.modules.elmo import batch_to_ids [as 別名]
def create_cached_cnn_embeddings(self, tokens: List[str]) -> None:
        """
        Given a list of tokens, this method precomputes word representations
        by running just the character convolutions and highway layers of elmo,
        essentially creating uncontextual word vectors. On subsequent forward passes,
        the word ids are looked up from an embedding, rather than being computed on
        the fly via the CNN encoder.
        This function sets 3 attributes:
        _word_embedding : ``torch.Tensor``
            The word embedding for each word in the tokens passed to this method.
        _bos_embedding : ``torch.Tensor``
            The embedding for the BOS token.
        _eos_embedding : ``torch.Tensor``
            The embedding for the EOS token.
        Parameters
        ----------
        tokens : ``List[str]``, required.
            A list of tokens to precompute character convolutions for.
        """
        tokens = [ELMoCharacterMapper.bos_token, ELMoCharacterMapper.eos_token] + tokens
        timesteps = 32
        batch_size = 32
        chunked_tokens = lazy_groups_of(iter(tokens), timesteps)

        all_embeddings = []
        device = get_device_of(next(self.parameters()))
        for batch in lazy_groups_of(chunked_tokens, batch_size):
            # Shape (batch_size, timesteps, 50)
            batched_tensor = batch_to_ids(batch)
            # NOTE: This device check is for when a user calls this method having
            # already placed the model on a device. If this is called in the
            # constructor, it will probably happen on the CPU. This isn't too bad,
            # because it's only a few convolutions and will likely be very fast.
            if device >= 0:
                batched_tensor = batched_tensor.cuda(device)
            output = self._token_embedder(batched_tensor, add_bos=False, add_eos=False)
            token_embedding = output["token_embedding"]
            mask = output["mask"]
            token_embedding, _ = remove_sentence_boundaries(token_embedding, mask, rmv_bos=False, rmv_eos=False)
            all_embeddings.append(token_embedding.view(-1, token_embedding.size(-1)))
        full_embedding = torch.cat(all_embeddings, 0)

        # We might have some trailing embeddings from padding in the batch, so
        # we clip the embedding and lookup to the right size.
        full_embedding = full_embedding[:len(tokens), :]
        embedding = full_embedding[2:len(tokens), :]
        vocab_size, embedding_dim = list(embedding.size())

        from allennlp.modules.token_embedders import Embedding # type: ignore
        self._bos_embedding = full_embedding[0, :]
        self._eos_embedding = full_embedding[1, :]
        self._word_embedding = Embedding(vocab_size, # type: ignore
                                         embedding_dim,
                                         weight=embedding.data,
                                         trainable=self._requires_grad,
                                         padding_index=0) 
開發者ID:jzhou316,項目名稱:Unsupervised-Sentence-Summarization,代碼行數:58,代碼來源:elmo_sequential_embedder.py


注:本文中的allennlp.modules.elmo.batch_to_ids方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。