当前位置: 首页>>代码示例>>Python>>正文


Python torch.transpose函数代码示例

本文整理汇总了Python中torch.transpose函数的典型用法代码示例。如果您正苦于以下问题:Python transpose函数的具体用法?Python transpose怎么用?Python transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

    def forward(self, sentence):
        # print(sentence)       [torch.LongTensor of size 47x64]
        x = self.word_embeddings(sentence)      # [torch.FloatTensor of size 47x64x100]
        x = torch.transpose(x, 0, 1)
        # print(x)            # [torch.FloatTensor of size 32x43x300]
        x = x.unsqueeze(1)
        # x = F.relu(self.convl3(x).squeeze(3))
        x = [F.relu(conv(x)).squeeze(3) for conv in self.convsl]
        # print(x)            # [torch.FloatTensor of size 32x200x41] 40 39 38 37
        # print(type(x))
        # a = torch.cat((a1.data, a2.data), 2)
        x = torch.cat(x, 2)
        # print(x)            # [torch.FloatTensor of size 32x200x195]
        x = torch.transpose(x, 1, 2)
        embeds = torch.transpose(x, 0, 1)
        # print(embeds)       # [torch.FloatTensor of size 195x32x200]
        # embeds = self.word_embeddings()
        # x = embeds.view(len(sentence), self.batch_size, -1)  # torch.Size([43, 64, 300])
        lstm_out, self.hidden = self.lstm(embeds, self.hidden)

        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)
        # lstm_out = F.tanh(lstm_out)

        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2))
        # print(lstm_out.size())
        lstm_out = lstm_out.squeeze(2)

        lstm_out = self.dropout(lstm_out)
        y = self.hidden2label(lstm_out)
        # log_probs = F.log_softmax(y)
        log_probs = y
        return log_probs
开发者ID:Joyce94,项目名称:sentence_classification,代码行数:33,代码来源:CNN_LSTM.py

示例2: forward

    def forward(self, sentence):
        # print(sentence)                                     # [torch.LongTensor of size 42x64]
        x = self.word_embeddings(sentence)
        x = self.dropout_embed(x)
        # print(embeds.size())                                # torch.Size([42, 64, 100])
        # x = embeds.view(len(sentence), self.batch_size, -1)
        print(x.size())                                     # torch.Size([42, 64, 100])
        print(self.hidden)
        lstm_out, self.hidden = self.bnlstm(x, self.hidden)   # lstm_out 10*5*50 hidden 1*5*50 *2
        # print(lstm_out)
        # lstm_out = [F.max_pool1d(i, len(lstm_out)).unsqueeze(2) for i in lstm_out]
        lstm_out = torch.transpose(lstm_out, 0, 1)
        lstm_out = torch.transpose(lstm_out, 1, 2)

        lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2))
        # print(lstm_out.size())
        lstm_out = lstm_out.squeeze(2)
        # y = self.hidden2label(lstm_out)

        #lstm_out = torch.cat(lstm_out, 1)
        # lstm_out = self.dropout(lstm_out)
        # lstm_out = lstm_out.view(len(sentence), -1)
        y = self.hidden2label(F.tanh(lstm_out))
        # log_probs = F.log_softmax(y)
        log_probs = y
        return log_probs
开发者ID:Joyce94,项目名称:sentence_classification,代码行数:26,代码来源:model_bnlstm.py

示例3: get_paf_and_heatmap

def get_paf_and_heatmap(model, img_raw, scale_search, param_stride=8, box_size=368):
    multiplier = [scale * box_size / img_raw.shape[0] for scale in scale_search]

    heatmap_avg = torch.zeros((len(multiplier), 19, img_raw.shape[0], img_raw.shape[1])).cuda()
    paf_avg = torch.zeros((len(multiplier), 38, img_raw.shape[0], img_raw.shape[1])).cuda()

    for i, scale in enumerate(multiplier):
        img_test = cv2.resize(img_raw, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
        img_test_pad, pad = pad_right_down_corner(img_test, param_stride, param_stride)
        img_test_pad = np.transpose(np.float32(img_test_pad[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5

        feed = Variable(torch.from_numpy(img_test_pad)).cuda()
        output1, output2 = model(feed)

        print(output1.size())
        print(output2.size())

        heatmap = nn.UpsamplingBilinear2d((img_raw.shape[0], img_raw.shape[1])).cuda()(output2)

        paf = nn.UpsamplingBilinear2d((img_raw.shape[0], img_raw.shape[1])).cuda()(output1)

        heatmap_avg[i] = heatmap[0].data
        paf_avg[i] = paf[0].data

    heatmap_avg = torch.transpose(torch.transpose(torch.squeeze(torch.mean(heatmap_avg, 0)), 0, 1), 1, 2).cuda()
    heatmap_avg = heatmap_avg.cpu().numpy()

    paf_avg = torch.transpose(torch.transpose(torch.squeeze(torch.mean(paf_avg, 0)), 0, 1), 1, 2).cuda()
    paf_avg = paf_avg.cpu().numpy()

    return paf_avg, heatmap_avg
开发者ID:codealphago,项目名称:pytorch-pose-estimation,代码行数:31,代码来源:pose_estimation.py

示例4: score

 def score(self, hidden, encoder_output):
     
     if self.method == 'dot':            
         # hidden is 1 by 256
         # encoder_output is 22 by 256
         encoder_output = torch.transpose(encoder_output, 0, 1)
         # encoder_output is 256 by 22
         energy = torch.matmul(hidden, encoder_output)
         return energy
     
     elif self.method == 'general':
         # hidden is 1 by 256
         # encoder_output is 256 by 22
         # encoder_output = torch.transpose(encoder_output, 0, 1)
         hidden = hidden.view(1, -1)
         a = self.attn(encoder_output)
         a = torch.transpose(a, 0, 1)
         energy = torch.matmul(hidden, a)
         return energy
     
     elif self.method == 'concat':
         len_encoder_output = encoder_output.size()[1]
         # hidden is 1 by 256
         # encoder_output is 256 by 22
         hidden = torch.transpose(hidden, 0, 1)
         # hidden is 256 by 1
         hidden = hidden.repeat(hidden_size, len_encoder_output)
         # hidden is 256 by 22
         concat = torch.cat((hidden, encoder_output), dim=0)
         # concat is 512 by 22
         # self.attn(concat) --> 256 by 22
         energy = torch.matmul(self.v, F.tanh(self.attn(concat)))
         return energy
开发者ID:vwrj,项目名称:neural_machine_translation,代码行数:33,代码来源:V2-Attention-Vish.py

示例5: forward

    def forward(self, x):
        x = self.embed(x)
        x = self.dropout(x)
        # x = x.view(len(x), x.size(1), -1)
        # x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        # bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2))
        bilstm_out = bilstm_out.squeeze(2)

        hidden2lable = self.hidden2label1(F.tanh(bilstm_out))

        gate_layer = F.sigmoid(self.gate_layer(bilstm_out))
        # calculate highway layer values
        gate_hidden_layer = torch.mul(hidden2lable, gate_layer)
        # if write like follow ,can run,but not equal the HighWay NetWorks formula
        # gate_input = torch.mul((1 - gate_layer), hidden2lable)
        gate_input = torch.mul((1 - gate_layer), bilstm_out)
        highway_output = torch.add(gate_hidden_layer, gate_input)

        logit = self.logit_layer(highway_output)

        return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:26,代码来源:model_HighWay_BiLSTM_1.py

示例6: forward

    def forward(self, X, X_mask):
        #X: [m, Tx] m = batch size, Tx = word count
        #print(X.size(), type(X))
        m = X.size()[0]
        Tx = X.size()[1]
        
        #embedding layer
        X = self.embedding(X)
        #X: [m, Tx, embedding_dim] m = batch size, Tx = word count
        #print(X.size(), type(X.data))
        assert X.size() == torch.Size([m, Tx, self.embedding_dim])
        
        #conv layer
        #transpose
        X = torch.transpose(X, 1, 2)
        #print(X.size(), type(X.data))

        X = self.conv(X)
        #print(X.size(), type(X.data))

        #transpose back
        X = torch.transpose(X, 1, 2)
        #print(X.size(), type(X.data))

        assert X.size() == torch.Size([m, Tx, 256])

        #maxpool layer
        #transpose
        X = torch.transpose(X, 1, 2)
        X = self.maxpool(X)
        #print(X.size(), type(X.data))
        #remove dimension
        X = X.squeeze()
        #print(X.size(), type(X.data))
        assert X.size() == torch.Size([m, 256])

        #linear 
        X = self.linear(X)
        #X: [m, 1]
        #print(X.size(), type(X))
        if self.num_classes == 2:
            assert X.size() == torch.Size([m, 1])
        else:
            assert X.size() == torch.Size([m, self.num_classes])
            
        if self.num_classes == 2:
            X = torch.squeeze(X)
            X = self.sigmoid(X)
            #X: [m]
            #print(X.size(), type(X))
            assert X.size() == torch.Size([m])
            return X
        else:
            return F.softmax(X)
开发者ID:mircean,项目名称:ML,代码行数:54,代码来源:models.py

示例7: forward

    def forward(self, x):
        '''
        :param x: batch * input_len * in_channels
        :return: batch * output_len * out_channels
        '''
        out= F.relu(self.maxPool(self.conv1(torch.transpose(x, 1, 2))))
        #print('out: ', out.size())

        out= F.relu(self.maxPool(self.conv2(out)))
        out= torch.transpose(F.relu(self.maxPool(self.conv3(out))), 1, 2)
        return out
开发者ID:chickenbestlover,项目名称:DrQA-RN,代码行数:11,代码来源:layers_RN_kmax.py

示例8: forward

    def forward(self, x):
        embed = self.embed(x)
        x = embed.view(len(x), embed.size(1), -1)
        bilstm_out, self.hidden = self.bilstm(x, self.hidden)

        bilstm_out = torch.transpose(bilstm_out, 0, 1)
        bilstm_out = torch.transpose(bilstm_out, 1, 2)
        bilstm_out = F.tanh(bilstm_out)
        bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
        y = self.hidden2label1(bilstm_out)
        y = self.hidden2label2(y)
        logit = y
        return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:13,代码来源:model_BiLSTM.py

示例9: myMatrixDivVector

def myMatrixDivVector(matrix, vector):
    """
       matrix(N,M) / vector(N) = matrix(N,M)
       for each i,j: 
           matrix_result[i][j] = matrix_source[i][j] / vector[i]
    """
    matrix1 = torch.transpose(matrix, 0, 1)
    print("matrix transpose:", matrix1)
    for i, nm in enumerate(matrix1):
        matrix1[i] = nm / vector
    print("matrix after division:", matrix1)
    matrix = torch.transpose(matrix1, 0, 1)
    print("matrix (final result):", matrix)
    return matrix
开发者ID:tianzhiliang,项目名称:test,代码行数:14,代码来源:myDivision.py

示例10: CORAL_loss

def CORAL_loss(source, target):
    d = source.data.shape[1]

    # source covariance
    xm = torch.mean(source, 1, keepdim=True) - source
    xc = torch.matmul(torch.transpose(xm, 0, 1), xm)

    # target covariance
    xmt = torch.mean(target, 1, keepdim=True) - target
    xct = torch.matmul(torch.transpose(xmt, 0, 1), xmt)
    # frobenius norm between source and target
    loss = (xc - xct).pow(2).sum().sqrt()
    loss = loss/(4*d*d)
    return loss
开发者ID:Silflame,项目名称:transferlearning,代码行数:14,代码来源:coral_loss.py

示例11: max_singular_value

def max_singular_value(W, u=None, Ip=1):
    """
    power iteration for weight parameter
    """
    #xp = W.data
    if u is None:
        u = torch.FloatTensor(1, W.size(0)).normal_(0, 1).cuda()
    _u = u
    for _ in range(Ip):
        #print(_u.size(), W.size())
        _v = _l2normalize(torch.matmul(_u, W.data), eps=1e-12)
        _u = _l2normalize(torch.matmul(_v, torch.transpose(W.data, 0, 1)), eps=1e-12)
    sigma = torch.matmul(torch.matmul(_v, torch.transpose(W.data, 0, 1)), torch.transpose(_u, 0, 1))
    return sigma, _v
开发者ID:LuChengTHU,项目名称:SN-GAN,代码行数:14,代码来源:train-conditional.py

示例12: forward

 def forward(self, input):
     embed = self.embed(input)
     input = embed.view(len(input), embed.size(1), -1)
     # lstm
     lstm_out, hidden = self.gru(input, self.hidden)
     lstm_out = torch.transpose(lstm_out, 0, 1)
     lstm_out = torch.transpose(lstm_out, 1, 2)
     # pooling
     lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
     lstm_out = F.tanh(lstm_out)
     # linear
     y = self.hidden2label(lstm_out)
     logit = y
     return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:14,代码来源:model_GRU.py

示例13: forward

 def forward(self, x):
     embed = self.embed(x)
     embed = self.dropout_embed(embed)
     x = embed.view(len(x), embed.size(1), -1)
     # lstm
     lstm_out, self.hidden = self.lstm(x, self.hidden)
     lstm_out = torch.transpose(lstm_out, 0, 1)
     lstm_out = torch.transpose(lstm_out, 1, 2)
     # pooling
     lstm_out = F.tanh(lstm_out)
     lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)
     lstm_out = F.tanh(lstm_out)
     # linear
     logit = self.hidden2label(lstm_out)
     return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:15,代码来源:model_LSTM.py

示例14: _get_lstm_features

 def _get_lstm_features(self, names, lengths):
     self.hidden = self.init_hidden(names.size(-1))
     embeds = self.char_embeds(names)  # Figure 4
     packed_input = pack_padded_sequence(embeds, lengths)  # Figure 5
     packed_output, (ht, ct) = self.lstm(packed_input, self.hidden)  # Figure 6
     lstm_out, _ = pad_packed_sequence(packed_output)  # Figure 7
     lstm_out = torch.transpose(lstm_out, 0, 1)
     lstm_out = torch.transpose(lstm_out, 1, 2)
     lstm_out = F.tanh(lstm_out)  # Figure 8
     lstm_out, indices = F.max_pool1d(lstm_out, lstm_out.size(2), return_indices=True)  # Figure 9
     lstm_out = lstm_out.squeeze(2)  #对维度的修正,使其符合输入格式
     lstm_out = F.tanh(lstm_out)
     lstm_feats = self.fully_connected_layer(lstm_out)
     output = self.softmax(lstm_feats)  # Figure 10
     return output
开发者ID:Joe955,项目名称:MachineLearning,代码行数:15,代码来源:pytorch-rnn-demo.py

示例15: forward

 def forward(self, input):
     embed = self.embed(input)
     embed = self.dropout(embed)  # add this reduce the acc
     input = embed.view(len(input), embed.size(1), -1)
     # gru
     gru_out, hidden = self.bigru(input, self.hidden)
     gru_out = torch.transpose(gru_out, 0, 1)
     gru_out = torch.transpose(gru_out, 1, 2)
     # pooling
     # gru_out = F.tanh(gru_out)
     gru_out = F.max_pool1d(gru_out, gru_out.size(2)).squeeze(2)
     gru_out = F.tanh(gru_out)
     # linear
     y = self.hidden2label(gru_out)
     logit = y
     return logit
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:16,代码来源:model_BiGRU.py


注:本文中的torch.transpose函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。