当前位置: 首页>>代码示例>>Python>>正文


Python torch.cat函数代码示例

本文整理汇总了Python中torch.cat函数的典型用法代码示例。如果您正苦于以下问题:Python cat函数的具体用法?Python cat怎么用?Python cat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了cat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: update

    def update(self):
        

        next_value = self.actor_critic(Variable(self.rollouts.states[-1], volatile=True))[0].data

        self.rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.tau)

        # values, action_log_probs, dist_entropy = self.actor_critic.evaluate_actions(
        #                                             Variable(self.rollouts.states[:-1].view(-1, *self.obs_shape)), 
        #                                             Variable(self.rollouts.actions.view(-1, self.action_shape)))


        values = torch.cat(self.rollouts.value_preds, 0).view(self.num_steps, self.num_processes, 1) 
        action_log_probs = torch.cat(self.rollouts.action_log_probs).view(self.num_steps, self.num_processes, 1)
        dist_entropy = torch.cat(self.rollouts.dist_entropy).view(self.num_steps, self.num_processes, 1)


        self.rollouts.value_preds = []
        self.rollouts.action_log_probs = []
        self.rollouts.dist_entropy = []

        advantages = Variable(self.rollouts.returns[:-1]) - values
        value_loss = advantages.pow(2).mean()

        action_loss = -(Variable(advantages.data) * action_log_probs).mean()

        self.optimizer.zero_grad()
        cost = action_loss + value_loss*self.value_loss_coef - dist_entropy.mean()*self.entropy_coef
        cost.backward()

        nn.utils.clip_grad_norm(self.actor_critic.parameters(), self.grad_clip)

        self.optimizer.step()
开发者ID:chriscremer,项目名称:Other_Code,代码行数:33,代码来源:a2c_agents.py

示例2: test_forward

    def test_forward(self):
        batch = 16
        len1, len2 = 21, 24
        seq_len1 = torch.randint(low=len1 - 10, high=len1 + 1, size=(batch,)).long()
        seq_len2 = torch.randint(low=len2 - 10, high=len2 + 1, size=(batch,)).long()

        mask1 = []
        for w in seq_len1:
            mask1.append([1] * w.item() + [0] * (len1 - w.item()))
        mask1 = torch.FloatTensor(mask1)
        mask2 = []
        for w in seq_len2:
            mask2.append([1] * w.item() + [0] * (len2 - w.item()))
        mask2 = torch.FloatTensor(mask2)

        d = 200  # hidden dimension
        l = 20  # number of perspective
        test1 = torch.randn(batch, len1, d)
        test2 = torch.randn(batch, len2, d)
        test1 = test1 * mask1.view(-1, len1, 1).expand(-1, len1, d)
        test2 = test2 * mask2.view(-1, len2, 1).expand(-1, len2, d)

        test1_fw, test1_bw = torch.split(test1, d // 2, dim=-1)
        test2_fw, test2_bw = torch.split(test2, d // 2, dim=-1)

        ml_fw = BiMpmMatching.from_params(Params({"is_forward": True, "num_perspectives": l}))
        ml_bw = BiMpmMatching.from_params(Params({"is_forward": False, "num_perspectives": l}))

        vecs_p_fw, vecs_h_fw = ml_fw(test1_fw, mask1, test2_fw, mask2)
        vecs_p_bw, vecs_h_bw = ml_bw(test1_bw, mask1, test2_bw, mask2)
        vecs_p, vecs_h = torch.cat(vecs_p_fw + vecs_p_bw, dim=2), torch.cat(vecs_h_fw + vecs_h_bw, dim=2)

        assert vecs_p.size() == torch.Size([batch, len1, 10 + 10 * l])
        assert vecs_h.size() == torch.Size([batch, len2, 10 + 10 * l])
        assert ml_fw.get_output_dim() == ml_bw.get_output_dim() == vecs_p.size(2) // 2 == vecs_h.size(2) // 2
开发者ID:apmoore1,项目名称:allennlp,代码行数:35,代码来源:bimpm_matching_test.py

示例3: forward

    def forward(self, input, context, question, output=None, hidden=None, context_alignment=None, question_alignment=None):
        context_output = output.squeeze(1) if output is not None else self.make_init_output(context)
        context_alignment = context_alignment if context_alignment is not None else self.make_init_output(context)
        question_alignment = question_alignment if question_alignment is not None else self.make_init_output(question)

        context_outputs, vocab_pointer_switches, context_question_switches, context_attentions, question_attentions, context_alignments, question_alignments = [], [], [], [], [], [], []
        for emb_t in input.split(1, dim=1):
            emb_t = emb_t.squeeze(1)
            context_output = self.dropout(context_output)
            if self.input_feed:
                emb_t = torch.cat([emb_t, context_output], 1)
            dec_state, hidden = self.rnn(emb_t, hidden)
            context_output, context_attention, context_alignment = self.context_attn(dec_state, context)
            question_output, question_attention, question_alignment = self.question_attn(dec_state, question)
            vocab_pointer_switch = self.vocab_pointer_switch(torch.cat([dec_state, context_output, emb_t], -1))
            context_question_switch = self.context_question_switch(torch.cat([dec_state, question_output, emb_t], -1))
            context_output = self.dropout(context_output)
            context_outputs.append(context_output)
            vocab_pointer_switches.append(vocab_pointer_switch)
            context_question_switches.append(context_question_switch)
            context_attentions.append(context_attention)
            context_alignments.append(context_alignment)
            question_attentions.append(question_attention)
            question_alignments.append(question_alignment)

        context_outputs, vocab_pointer_switches, context_question_switches, context_attention, question_attention = [self.package_outputs(x) for x in [context_outputs, vocab_pointer_switches, context_question_switches, context_attentions, question_attentions]]
        return context_outputs, context_attention, question_attention, context_alignment, question_alignment, vocab_pointer_switches, context_question_switches, hidden
开发者ID:shaogx,项目名称:decaNLP,代码行数:27,代码来源:multitask_question_answering_network.py

示例4: forward

    def forward(input, hidden, weight):
        output = []
        input_offset = input.size(0)
        last_batch_size = batch_sizes[-1]
        initial_hidden = hidden
        flat_hidden = not isinstance(hidden, tuple)
        if flat_hidden:
            hidden = (hidden,)
            initial_hidden = (initial_hidden,)
        hidden = tuple(h[:batch_sizes[-1]] for h in hidden)
        for batch_size in reversed(batch_sizes):
            inc = batch_size - last_batch_size
            if inc > 0:
                hidden = tuple(torch.cat((h, ih[last_batch_size:batch_size]), 0)
                               for h, ih in zip(hidden, initial_hidden))
            last_batch_size = batch_size
            step_input = input[input_offset - batch_size:input_offset]
            input_offset -= batch_size

            if flat_hidden:
                hidden = (inner(step_input, hidden[0], *weight),)
            else:
                hidden = inner(step_input, hidden, *weight)
            output.append(hidden[0])

        output.reverse()
        output = torch.cat(output, 0)
        if flat_hidden:
            hidden = hidden[0]
        return hidden, output
开发者ID:cclauss,项目名称:torchMoji,代码行数:30,代码来源:lstm.py

示例5: forward

 def forward(self, inputs, targets):
     """
     Args:
     - inputs: feature matrix with shape (batch_size, feat_dim)
     - targets: ground truth labels with shape (num_classes)
     """
     n = inputs.size(0)
     
     # Compute pairwise distance, replace by the official when merged
     dist = torch.pow(inputs, 2).sum(dim=1, keepdim=True).expand(n, n)
     dist = dist + dist.t()
     dist.addmm_(1, -2, inputs, inputs.t())
     dist = dist.clamp(min=1e-12).sqrt()  # for numerical stability
     
     # For each anchor, find the hardest positive and negative
     mask = targets.expand(n, n).eq(targets.expand(n, n).t())
     dist_ap, dist_an = [], []
     for i in range(n):
         dist_ap.append(dist[i][mask[i]].max().unsqueeze(0))
         dist_an.append(dist[i][mask[i] == 0].min().unsqueeze(0))
     dist_ap = torch.cat(dist_ap)
     dist_an = torch.cat(dist_an)
     
     # Compute ranking hinge loss
     y = torch.ones_like(dist_an)
     loss = self.ranking_loss(dist_an, dist_ap, y)
     return loss
开发者ID:zysolanine,项目名称:deep-person-reid,代码行数:27,代码来源:losses.py

示例6: forward

    def forward(self, context, question, context_char=None, question_char=None, context_f=None, question_f=None):
        assert context_char is not None and question_char is not None and context_f is not None \
               and question_f is not None

        vis_param = {}

        # (seq_len, batch, additional_feature_size)
        context_f = context_f.transpose(0, 1)
        question_f = question_f.transpose(0, 1)

        # word-level embedding: (seq_len, batch, word_embedding_size)
        context_vec, context_mask = self.embedding.forward(context)
        question_vec, question_mask = self.embedding.forward(question)

        # char-level embedding: (seq_len, batch, char_embedding_size)
        context_emb_char, context_char_mask = self.char_embedding.forward(context_char)
        question_emb_char, question_char_mask = self.char_embedding.forward(question_char)

        context_vec_char = self.char_encoder.forward(context_emb_char, context_char_mask, context_mask)
        question_vec_char = self.char_encoder.forward(question_emb_char, question_char_mask, question_mask)

        # mix embedding: (seq_len, batch, embedding_size)
        context_vec = torch.cat((context_vec, context_vec_char, context_f), dim=-1)
        question_vec = torch.cat((question_vec, question_vec_char, question_f), dim=-1)

        # encode: (seq_len, batch, hidden_size*2)
        context_encode, _ = self.encoder.forward(context_vec, context_mask)
        question_encode, zs = self.encoder.forward(question_vec, question_mask)

        align_ct = context_encode
        for i in range(self.num_align_hops):
            # align: (seq_len, batch, hidden_size*2)
            qt_align_ct, alpha = self.aligner[i](align_ct, question_encode, question_mask)
            bar_ct = self.aligner_sfu[i](align_ct, torch.cat([qt_align_ct,
                                                              align_ct * qt_align_ct,
                                                              align_ct - qt_align_ct], dim=-1))
            vis_param['match'] = alpha

            # self-align: (seq_len, batch, hidden_size*2)
            ct_align_ct, self_alpha = self.self_aligner[i](bar_ct, context_mask)
            hat_ct = self.self_aligner_sfu[i](bar_ct, torch.cat([ct_align_ct,
                                                                 bar_ct * ct_align_ct,
                                                                 bar_ct - ct_align_ct], dim=-1))
            vis_param['self-match'] = self_alpha

            # aggregation: (seq_len, batch, hidden_size*2)
            align_ct, _ = self.aggregation[i](hat_ct, context_mask)

        # pointer net: (answer_len, batch, context_len)
        for i in range(self.num_ptr_hops):
            ans_range_prop, zs = self.ptr_net[i](align_ct, context_mask, zs)

        # answer range
        ans_range_prop = ans_range_prop.transpose(0, 1)
        if not self.training and self.enable_search:
            ans_range = answer_search(ans_range_prop, context_mask)
        else:
            _, ans_range = torch.max(ans_range_prop, dim=2)

        return ans_range_prop, ans_range, vis_param
开发者ID:SerenaKhoo,项目名称:Match-LSTM,代码行数:60,代码来源:mnemonic.py

示例7: forward

    def forward(self, input):
        x, low_level_features = self.resnet_features(input)
        x1 = self.aspp1(x)
        x2 = self.aspp2(x)
        x3 = self.aspp3(x)
        x4 = self.aspp4(x)
        x5 = self.global_avg_pool(x)
        x5 = F.upsample(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)

        x = torch.cat((x1, x2, x3, x4, x5), dim=1)

        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = F.upsample(x, size=(int(math.ceil(input.size()[-2]/4)),
                                int(math.ceil(input.size()[-1]/4))), mode='bilinear', align_corners=True)

        low_level_features = self.conv2(low_level_features)
        low_level_features = self.bn2(low_level_features)
        low_level_features = self.relu(low_level_features)


        x = torch.cat((x, low_level_features), dim=1)
        x = self.last_conv(x)
        x = F.interpolate(x, size=input.size()[2:], mode='bilinear', align_corners=True)

        return x
开发者ID:WenmuZhou,项目名称:pytorch-deeplab-xception,代码行数:27,代码来源:deeplab_resnet.py

示例8: forward

    def forward(self, x):

        # feedforward x to the first layer and add the result to the list
        x_first_out = self.layers_list[0].forward(x)

        # initialize the list
        forwarded_output_list = []

        forwarded_output_list.append(x_first_out)
        prev_x = x

        # feedforward process from the second to the last layer
        for i in range(1, self.num_layers):
            # concatenate filters
            concatenated_filters = torch.cat((forwarded_output_list[i-1], prev_x), 1)
            # forward
            x_next_out = self.layers_list[i].forward(concatenated_filters)
            # add to the list
            forwarded_output_list.append(x_next_out)
            # prepare the temporary variable for the next loop
            prev_x = concatenated_filters

        # prepare the output (this will have (k_feature_maps * layers) feature maps)
        output_x = torch.cat(forwarded_output_list, 1)

        return output_x
开发者ID:TheIllusion,项目名称:TheIllusionsLibraries,代码行数:26,代码来源:sub_modules.py

示例9: forward

 def forward(self, x, geneexpr):
     out1 = F.relu(self.conv1(x)) # 3 of these
     out1a = F.relu(self.conv1a(x))
     out1b = F.relu(self.conv1b(x))
     out = self.maxpool_3(torch.cat([out1,out1a,out1b],dim=1)) # (?, 300, 600)
     out = F.pad(out,(5,5))
     out = F.relu(self.conv2(out)) # (?, 300, 140)
     out = self.maxpool_4(out) # (?, 300, 35)
     out = F.pad(out,(3,3))
     out = F.relu(self.conv3(out)) # (?, 500, 32)
     out = F.pad(out,(1,1))
     out = self.maxpool_4(out) # (?, 500, 8)
     out = out.view(-1, 200*13) # (?, 500*8)
     if self.gdl == 0:
         geneexpr = self.dropout(geneexpr)
         geneexpr = F.relu(self.genelinear(geneexpr))
     elif self.gdl == 1:
         geneexpr = F.relu(self.genelinear(geneexpr)) # (?, 500)
         geneexpr = self.dropout(geneexpr)
     out = torch.cat([out, geneexpr], dim=1) # (?, 200*13+500)
     out = F.relu(self.linear1(out)) # (?, 800)
     out = self.dropout(out)
     out = F.relu(self.linear2(out)) # (?, 800)
     out = self.dropout(out)
     return self.output(out) # (?, 1)
开发者ID:anihamde,项目名称:cs287-s18,代码行数:25,代码来源:old_model.py

示例10: circular_convolution_conv

def circular_convolution_conv(keys, values, cuda=False):
    '''
    For the circular convolution of x and y to be equivalent,
    you must pad the vectors with zeros to length at least N + L - 1
    before you take the DFT. After you invert the product of the
    DFTs, retain only the first N + L - 1 elements.
    '''
    assert values.dim() == keys.dim() == 2, "only 2 dims supported"
    batch_size = keys.size(0)
    keys_feature_size = keys.size(1)
    values_feature_size = values.size(1)
    required_size = keys_feature_size + values_feature_size - 1

    # zero pad upto N+L-1
    zero_for_keys = Variable(float_type(cuda)(
        batch_size, required_size - keys_feature_size).zero_())
    zero_for_values = Variable(float_type(cuda)(
        batch_size, required_size - values_feature_size).zero_())
    keys = torch.cat([keys, zero_for_keys], -1)
    values = torch.cat([values, zero_for_values], -1)

    # do the conv and reshape and return
    print('values = ', values.view(batch_size, 1, -1).size(), ' keys = ', keys.view(batch_size, 1, -1).size())
    print('conv = ', F.conv1d(values.view(batch_size, 1, -1),
                    keys.view(batch_size, 1, -1)).size())
    return F.conv1d(values.view(batch_size, 1, -1),
                    keys.view(batch_size, 1, -1)).squeeze()[:, 0:required_size]
开发者ID:jramapuram,项目名称:memory,代码行数:27,代码来源:holographic_memory.py

示例11: forward

 def forward(self, category, input, hidden):
     input_combined = torch.cat((category, input, hidden), 1)
     hidden = self.i2h(input_combined)
     output = self.i2o(input_combined)
     output_combined = torch.cat((hidden, output), 1)
     output = self.o2o(output_combined)
     return output, hidden
开发者ID:Cadene,项目名称:practical-pytorch,代码行数:7,代码来源:model.py

示例12: circular_convolution_fft

def circular_convolution_fft(keys, values, normalized=True, conj=False, cuda=False):
    '''
    For the circular convolution of x and y to be equivalent,
    you must pad the vectors with zeros to length at least N + L - 1
    before you take the DFT. After you invert the product of the
    DFTs, retain only the first N + L - 1 elements.
    '''
    assert values.dim() == keys.dim() == 2, "only 2 dims supported"
    assert values.size(-1) % 2 == keys.size(-1) % 2 == 0, "need last dim to be divisible by 2"
    batch_size, keys_feature_size = keys.size(0), keys.size(1)
    values_feature_size = values.size(1)
    required_size = keys_feature_size + values_feature_size - 1
    required_size = required_size + 1 if required_size % 2 != 0 else required_size

    # conj transpose
    keys = Complex(keys).conj().unstack() if conj else keys

    # reshape to [batch, [real, imag]]
    half = keys.size(-1) // 2
    keys = torch.cat([keys[:, 0:half].unsqueeze(2), keys[:, half:].unsqueeze(2)], -1)
    values = torch.cat([values[:, 0:half].unsqueeze(2), values[:, half:].unsqueeze(2)], -1)

    # do the fft, ifft and return num_required
    kf = torch.fft(keys, signal_ndim=1, normalized=normalized)
    vf = torch.fft(values, signal_ndim=1, normalized=normalized)
    kvif = torch.ifft(kf*vf, signal_ndim=1, normalized=normalized)#[:, 0:required_size]

    # if conj:
    #     return Complex(kvif[:, :, 1], kvif[:, :, 0]).unstack()
    #return Complex(kvif[:, :, 0], kvif[:, :, 1]).abs() if not conj \
    # return Complex(kvif[:, :, 0], kvif[:, :, 1]).unstack() # if not conj \
        # else Complex(kvif[:, :, 1], kvif[:, :, 0]).abs()

    return Complex(kvif[:, :, 0], kvif[:, :, 1]).unstack().view(batch_size, -1)
开发者ID:jramapuram,项目名称:memory,代码行数:34,代码来源:holographic_memory.py

示例13: __call__

    def __call__(self, grid):
        batch_size, _, grid_dimX, grid_dimY, grid_dimZ = grid.size()

        k = 1.0

        x_coords = 2.0 * k * torch.arange(grid_dimX, dtype=torch.float32).unsqueeze(1).unsqueeze(1
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimX - 1.0) - 1.0
        y_coords = 2.0 * k * torch.arange(grid_dimY, dtype=torch.float32).unsqueeze(1).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimY - 1.0) - 1.0
        z_coords = 2.0 * k * torch.arange(grid_dimZ, dtype=torch.float32).unsqueeze(0).unsqueeze(0
            ).expand(grid_dimX, grid_dimY, grid_dimZ) / (grid_dimZ - 1.0) - 1.0

        coords = torch.stack((x_coords, y_coords, z_coords), dim=0)

        if self.with_r:
            rs = ((x_coords ** 2) + (y_coords ** 2) + (z_coords ** 2)) ** 0.5
            rs = k * rs / torch.max(rs)
            rs = torch.unsqueeze(rs, dim=0)
            coords = torch.cat((coords, rs), dim=0)

        coords = torch.unsqueeze(coords, dim=0).repeat(batch_size, 1, 1, 1, 1)

        grid = torch.cat((coords.to(grid.device), grid), dim=1)

        return grid
开发者ID:caskeep,项目名称:3D-SIS,代码行数:25,代码来源:coord_conv3d.py

示例14: encode

    def encode(self, src_sents_var, src_sents_len):
        """Encode the input natural language utterance

        Args:
            src_sents_var: a variable of shape (src_sent_len, batch_size), representing word ids of the input
            src_sents_len: a list of lengths of input source sentences, sorted by descending order

        Returns:
            src_encodings: source encodings of shape (batch_size, src_sent_len, hidden_size * 2)
            last_state, last_cell: the last hidden state and cell state of the encoder,
                                   of shape (batch_size, hidden_size)
        """

        # (tgt_query_len, batch_size, embed_size)
        # apply word dropout
        if self.training and self.args.word_dropout:
            mask = Variable(self.new_tensor(src_sents_var.size()).fill_(1. - self.args.word_dropout).bernoulli().long())
            src_sents_var = src_sents_var * mask + (1 - mask) * self.vocab.source.unk_id

        src_token_embed = self.src_embed(src_sents_var)
        packed_src_token_embed = pack_padded_sequence(src_token_embed, src_sents_len)

        # src_encodings: (tgt_query_len, batch_size, hidden_size)
        src_encodings, (last_state, last_cell) = self.encoder_lstm(packed_src_token_embed)
        src_encodings, _ = pad_packed_sequence(src_encodings)
        # src_encodings: (batch_size, tgt_query_len, hidden_size)
        src_encodings = src_encodings.permute(1, 0, 2)

        # (batch_size, hidden_size * 2)
        last_state = torch.cat([last_state[0], last_state[1]], 1)
        last_cell = torch.cat([last_cell[0], last_cell[1]], 1)

        return src_encodings, (last_state, last_cell)
开发者ID:chubbymaggie,项目名称:tranX,代码行数:33,代码来源:parser.py

示例15: test

def test(model):
    game_state = GameState()

    # initial action is do nothing
    action = torch.zeros([model.number_of_actions], dtype=torch.float32)
    action[0] = 1
    image_data, reward, terminal = game_state.frame_step(action)
    image_data = resize_and_bgr2gray(image_data)
    image_data = image_to_tensor(image_data)
    state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)

    while True:
        # get output from the neural network
        output = model(state)[0]

        action = torch.zeros([model.number_of_actions], dtype=torch.float32)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action = action.cuda()

        # get action
        action_index = torch.argmax(output)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action_index = action_index.cuda()
        action[action_index] = 1

        # get next state
        image_data_1, reward, terminal = game_state.frame_step(action)
        image_data_1 = resize_and_bgr2gray(image_data_1)
        image_data_1 = image_to_tensor(image_data_1)
        state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)

        # set state to be state_1
        state = state_1
开发者ID:ZedZero,项目名称:dqn_flappy_bird,代码行数:33,代码来源:dqn.py


注:本文中的torch.cat函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。