當前位置: 首頁>>代碼示例>>Python>>正文


Python nn.Embedding方法代碼示例

本文整理匯總了Python中torch.nn.Embedding方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Embedding方法的具體用法?Python nn.Embedding怎麽用?Python nn.Embedding使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.nn的用法示例。


在下文中一共展示了nn.Embedding方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, config):
        super(LanguageModel, self).__init__()
        self.config = config
        self.ntoken = ntoken = config.ntoken
        self.ninp = ninp = config.ninp
        self.nhid = nhid = config.nhid
        self.nlayers = nlayers = config.nlayers

        self.encoder = nn.Embedding(ntoken, ninp)
        self.dropouti = nn.Dropout(config.dropouti) if config.dropouti > 0 else None
        self.lstm = LSTM([ninp] + [nhid] * nlayers, bias=False, layernorm=True,
                         dropoutr=config.dropoutr, dropouth=config.dropouth, dropouto=config.dropouto)
        self.projection = nn.Linear(nhid, ninp)
        self.decoder = nn.Linear(ninp, ntoken)
        self.decoder.weight = self.encoder.weight

        self.init_weights() 
開發者ID:clovaai,項目名稱:subword-qac,代碼行數:19,代碼來源:model.py

示例2: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, **kwargs):
        super(BiLSTM, self).__init__()
        for k in kwargs:
            self.__setattr__(k, kwargs[k])

        V = self.embed_num
        D = self.embed_dim
        C = self.label_num
        paddingId = self.paddingId

        self.embed = nn.Embedding(V, D, padding_idx=paddingId)

        if self.pretrained_embed:
            self.embed.weight.data.copy_(self.pretrained_weight)
        else:
            init_embedding(self.embed.weight)

        self.dropout_embed = nn.Dropout(self.dropout_emb)
        self.dropout = nn.Dropout(self.dropout)

        self.bilstm = nn.LSTM(input_size=D, hidden_size=self.lstm_hiddens, num_layers=self.lstm_layers,
                              bidirectional=True, batch_first=True, bias=True)

        self.linear = nn.Linear(in_features=self.lstm_hiddens * 2, out_features=C, bias=True)
        init_linear(self.linear) 
開發者ID:bamtercelboo,項目名稱:pytorch_NER_BiLSTM_CNN_CRF,代碼行數:27,代碼來源:BiLSTM.py

示例3: _nn_embed

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def _nn_embed(self, embed_dict, words_dict):
        """
        :param embed_dict:
        :param words_dict:
        """
        print("loading pre_train embedding by nn.Embedding for out of vocabulary.")
        embed = nn.Embedding(int(self.words_count), int(self.dim))
        init.xavier_uniform_(embed.weight.data)
        embeddings = np.array(embed.weight.data)
        for word in words_dict:
            if word in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
                self.exact_count += 1
            elif word.lower() in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
                self.fuzzy_count += 1
            else:
                self.oov_count += 1
        embeddings[self.padID] = 0
        final_embed = torch.from_numpy(embeddings).float()
        return final_embed 
開發者ID:bamtercelboo,項目名稱:pytorch_NER_BiLSTM_CNN_CRF,代碼行數:23,代碼來源:Embed.py

示例4: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, vocab_size, word_dim, embed_size, num_layers, pooling='last',
                 use_abs=False, bid=False, glove_path='data/glove.pkl'):
        super(EncoderTextGRU, self).__init__()
        self.use_abs = use_abs
        self.embed_size = embed_size
        self.combiner = Combiner(pooling, embed_size)

        # word embedding
        self.word_dim = word_dim
        if word_dim > 300:
            self.embed = nn.Embedding(vocab_size, word_dim-300)
        _, embed_weight = pickle.load(open(glove_path, 'rb'))
        self.glove = Variable(torch.cuda.FloatTensor(embed_weight), requires_grad=False)

        # caption embedding
        self.rnn = nn.GRU(word_dim, embed_size//(2 if bid else 1), num_layers, batch_first=True, bidirectional=bid)

        self.init_weights() 
開發者ID:ExplorerFreda,項目名稱:VSE-C,代碼行數:20,代碼來源:model.py

示例5: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, kernel_size, segment_num=None):
        """
        Args:
            input_size: dimention of input embedding
            kernel_size: kernel_size for CNN
            padding: padding for CNN
        hidden_size: hidden size
        """
        super().__init__()
        self.segment_num = segment_num
        if self.segment_num != None:
            self.mask_embedding = nn.Embedding(segment_num + 1, segment_num)
            self.mask_embedding.weight.data.copy_(torch.FloatTensor(np.concatenate([np.zeros((1, segment_num)), np.identity(segment_num)], axis=0)))
            self.mask_embedding.weight.requires_grad = False
            self._minus = -100
        self.pool = nn.MaxPool1d(kernel_size) 
開發者ID:thunlp,項目名稱:OpenNRE,代碼行數:18,代碼來源:max_pool.py

示例6: init_weights

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def init_weights(module):
    # Exceptions
    if type(module) == nn.Embedding:
        module.weight.data.normal_(0, 1)
    else:
        for p in module.parameters():
            data = p.data
            if data.dim() == 1:
                # bias
                data.zero_()
            elif data.dim() == 2:
                # linear weight
                n = data.size(1)
                stdv = 1. / math.sqrt(n)
                data.normal_(0, stdv)
            elif data.dim() in [3, 4]:
                # conv weight
                n = data.size(1)
                for k in data.size()[2:]:
                    n *= k
                stdv = 1. / math.sqrt(n)
                data.normal_(0, stdv)
            else:
                raise NotImplementedError 
開發者ID:Alexander-H-Liu,項目名稱:End-to-end-ASR-Pytorch,代碼行數:26,代碼來源:util.py

示例7: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, vocab_size, embed_size, hidden_size, dropout=None, \
        bidirectional=False, shared_embed=None, init_word_embed=None, rnn_type='lstm', use_cuda=True):
        super(EncoderRNN, self).__init__()
        if not rnn_type in ('lstm', 'gru'):
            raise RuntimeError('rnn_type is expected to be lstm or gru, got {}'.format(rnn_type))
        if bidirectional:
            print('[ Using bidirectional {} encoder ]'.format(rnn_type))
        else:
            print('[ Using {} encoder ]'.format(rnn_type))
        if bidirectional and hidden_size % 2 != 0:
            raise RuntimeError('hidden_size is expected to be even in the bidirectional mode!')
        self.dropout = dropout
        self.rnn_type = rnn_type
        self.use_cuda = use_cuda
        self.hidden_size = hidden_size // 2 if bidirectional else hidden_size
        self.num_directions = 2 if bidirectional else 1
        self.embed = shared_embed if shared_embed is not None else nn.Embedding(vocab_size, embed_size, padding_idx=0)
        model = nn.LSTM if rnn_type == 'lstm' else nn.GRU
        self.model = model(embed_size, self.hidden_size, 1, batch_first=True, bidirectional=bidirectional)
        if shared_embed is None:
            self.init_weights(init_word_embed) 
開發者ID:hugochan,項目名稱:BAMnet,代碼行數:23,代碼來源:modules.py

示例8: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, vocab_size, label_size, mode='elman', bidirectional=False, cuda=False, is_training=True):
  
        super(SlotFilling, self).__init__()
        self.is_training = is_training
        embedding_dim = 100
        hidden_size = 75
        self.embedding = nn.Embedding(vocab_size, embedding_dim)
         
        if mode == 'lstm':
            self.rnn = nn.LSTM(input_size=embedding_dim,
                            hidden_size=hidden_size,
                            bidirectional=bidirectional,
                            batch_first=True)
        else:
            self.rnn = RNN(input_size=embedding_dim,
                        hidden_size=hidden_size,
                        mode=mode,
                        cuda=cuda,
                        bidirectional=bidirectional,
                        batch_first=True)
        if bidirectional: 
            self.fc = nn.Linear(2*hidden_size, label_size)
        else:
            self.fc = nn.Linear(hidden_size, label_size) 
開發者ID:llhthinker,項目名稱:slot-filling,代碼行數:26,代碼來源:rnn.py

示例9: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, input_size, embed_size, hidden_size,
                 n_layers=1, dropout=0.5, batch_first=False, bidirectional=False):
        """

        :param input_size: 詞典大小
        :param embed_size:  word2vec嵌入維度
        :param hidden_size:  encoder rnn 隱藏態維度
        :param n_layers:   rnn層數
        :param dropout:    dropout rate
        """
        super(Encoder, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embed_size = embed_size
        self.embed = nn.Embedding(input_size, embed_size)
        self.gru = nn.GRU(embed_size, hidden_size, n_layers,
                          dropout=dropout, bidirectional=bidirectional, batch_first=batch_first) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:19,代碼來源:model.py

示例10: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, args):
        super().__init__()
        self.args = args
        vocab_size = args["vocab_size"]
        class_num = args["class_num"]
        kernel_num = args["kernel_num"]
        kernel_size = args["kernel_size"]
        embed_dim = args["embed_dim"]
        self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
        self.conv_11 = nn.Conv2d(1, kernel_num,
                                 (kernel_size[0], embed_dim), padding=((kernel_size[0] - 1) / 2, 0))
        self.conv_12 = nn.Conv2d(1, kernel_num,
                                 (kernel_size[1], embed_dim), padding=((kernel_size[1] - 1) / 2, 0))
        self.conv_13 = nn.Conv2d(1, kernel_num,
                                 (kernel_size[1], embed_dim), padding=((kernel_size[2] - 1) / 2, 0))
        self.att_1 = nn.Linear() 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:18,代碼來源:cnn_attention.py

示例11: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, param: dict):
        super().__init__(param)
        ci = 1  # input chanel size
        kernel_num = param['kernel_num'] # output chanel size
        kernel_size = param['kernel_size']
        vocab_size = param['vocab_size']
        embed_dim = param['embed_dim']
        dropout = param['dropout']
        class_num = param['class_num']
        self.param = param
        self.embed = nn.Embedding(vocab_size, embed_dim, padding_idx=1)
        self.conv11 = nn.Conv2d(ci, kernel_num, (kernel_size[0], embed_dim))
        self.conv12 = nn.Conv2d(ci, kernel_num, (kernel_size[1], embed_dim))
        self.conv13 = nn.Conv2d(ci, kernel_num, (kernel_size[2], embed_dim))
        self.dropout = nn.Dropout(dropout)
        self.fc1 = nn.Linear(len(kernel_size) * kernel_num, class_num) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:18,代碼來源:text_cnn.py

示例12: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, hidden_size, embed_size, output_size,
                 n_layers=1, dropout=0.1):
        """

        Args:
            hidden_size: GRU hidden_size
            embed_size:  embedding size
            output_size:  outputs vocab size
            n_layers:  GRU layers
            dropout:  dropout ratio,
        """
        super(AttnDecoder, self).__init__()
        # Define parameters
        self.hidden_size = hidden_size
        self.embed_size = embed_size
        self.output_size = output_size
        self.n_layers = n_layers
        self.dropout = dropout
        # Define layers
        self.embedding = nn.Embedding(output_size, embed_size)
        self.dropout_layer = nn.Dropout(dropout)
        self.attn = Attn('concat', hidden_size)
        self.gru = nn.GRU(hidden_size + embed_size, hidden_size, n_layers, dropout=dropout)
        self.out = nn.Linear(hidden_size, output_size) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:26,代碼來源:decoder.py

示例13: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, input_size, embed_size, hidden_size,
                 n_layers=1, dropout=0.3):
        """initialize encoder

        Args:
            input_size: <int>, encoder vocab size
            embed_size: <int>, encoder embed size
            hidden_size: <int>, GRU hidden state size
            n_layers: <int>, GRU layers
            dropout: <float>, dropout rate

        Notes:
            default batch_first, bidirectional=True
        """
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.embed_size = embed_size
        self.n_layers = n_layers
        self.dropout = self.dropout
        self.embedding = nn.Embedding(input_size, embed_size)
        self.gru = nn.GRU(embed_size, hidden_size, n_layers, bidirectional=True, dropout=dropout) 
開發者ID:EvilPsyCHo,項目名稱:TaskBot,代碼行數:24,代碼來源:encoder.py

示例14: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, config):
        super(NERModel, self).__init__()

        self.config = config
        # Word Embedding, Word Local Position Embedding
        self.token_embedding = NERTokenEmbedding(
            config.vocab_size, config.hidden_size,
            max_sent_len=config.max_sent_len, dropout=config.dropout
        )
        # Multi-layer Transformer Layers to Incorporate Contextual Information
        self.token_encoder = transformer.make_transformer_encoder(
            config.num_tf_layers, config.hidden_size, ff_size=config.ff_size, dropout=config.dropout
        )
        if self.config.use_crf_layer:
            self.crf_layer = CRFLayer(config.hidden_size, self.config.num_entity_labels)
        else:
            # Token Label Classification
            self.classifier = nn.Linear(config.hidden_size, self.config.num_entity_labels) 
開發者ID:dolphin-zs,項目名稱:Doc2EDAG,代碼行數:20,代碼來源:ner_model.py

示例15: __init__

# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Embedding [as 別名]
def __init__(self, config):
        super(BertEmbeddings, self).__init__()
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)

        # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
        # any TensorFlow checkpoint file
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
        self.dropout = nn.Dropout(config.hidden_dropout_prob) 
開發者ID:ymcui,項目名稱:cmrc2019,代碼行數:12,代碼來源:modeling.py


注:本文中的torch.nn.Embedding方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。