当前位置: 首页>>代码示例>>Python>>正文


Python torch.zeros函数代码示例

本文整理汇总了Python中torch.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_hidden

 def init_hidden(self):
     # the first is the hidden h
     # the second is the cell  c
     return (
         Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim)),
         Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
     )
开发者ID:Suluo,项目名称:Kaggle,代码行数:7,代码来源:lstm.py

示例2: l2l_validate

def l2l_validate(model, cluster_center, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        data_l = generate_data_l(cluster_center)
        data_n = generate_data_n(cluster_center, model.n_class_n)
        x_l, y_l = Variable(torch.from_numpy(data_l[0])).float(), Variable(
            torch.from_numpy(data_l[1]))
        x_n, y_n = Variable(torch.from_numpy(data_n[0])).float(), Variable(
            torch.from_numpy(data_n[1]))
        pred_ll, pred_nl, w, b = model(x_l, x_n)
        M = Variable(torch.zeros(model.n_class_n, model.n_dim))
        B = Variable(torch.zeros(model.n_class_n))
        for k in range(model.n_class_n):
            M[k] = torch.cat((w[:, 0][y_n == model.n_class_l + k].view(-1, 1),
                              w[:, 1][y_n == model.n_class_l + k].view(-1, 1)), 1).mean(0)
            B[k] = b[y_n == model.n_class_l + k].mean()
        pred_ln = torch.mm(x_l, M.t()) + B.view(1, -1).expand_as(torch.mm(x_l, M.t()))
        pred_nn = torch.mm(x_n, M.t()) + B.view(1, -1).expand_as(torch.mm(x_n, M.t()))
        pred = torch.cat((torch.cat((pred_ll, pred_nl)), torch.cat((pred_ln, pred_nn))), 1)
        pred = pred.data.max(1)[1]
        y = torch.cat((y_l, y_n))
        accuracy = pred.eq(y.data).cpu().sum() * 1.0 / y.size()[0]
        # print('accuracy: %.2f' % accuracy)
        val_accuracy.append(accuracy)
        acc_l = pred.eq(y.data).cpu()[0:100].sum() * 1.0 / 100
        acc_n = pred.eq(y.data).cpu()[100:150].sum() * 1.0 / 50
        print('accuracy: %.2f, lifelong accuracy: %.2f, new accuracy: %.2f' % (accuracy, acc_l, acc_n))

    return numpy.mean(numpy.asarray(val_accuracy))
开发者ID:yangyi02,项目名称:my-scripts,代码行数:29,代码来源:learning_to_learn_lifelong_newclass_trunc.py

示例3: sample

 def sample(self, mu, logvar, k):
     eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()) #[P,B,Z]
     z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
     logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                         Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
     logqz = lognormal(z, mu, logvar)
     return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:7,代码来源:iwae.py

示例4: init_hidden

 def init_hidden(self, num_layers, batch_size):
     # the first is the hidden h
     # the second is the cell  c
     # return (Variable(torch.zeros(1, batch_size, self.hidden_dim)),
     #          Variable(torch.zeros(1, batch_size, self.hidden_dim)))
     return (Variable(torch.zeros(1 * num_layers, batch_size, self.hidden_dim)),
             Variable(torch.zeros(1 * num_layers, batch_size, self.hidden_dim)))
开发者ID:fengzhangyin,项目名称:cnn-lstm-bilstm-deepcnn-clstm-in-pytorch,代码行数:7,代码来源:model_LSTM.py

示例5: test_augmented_lstm_computes_same_function_as_pytorch_lstm

    def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
        augmented_lstm = AugmentedLstm(10, 11)
        pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
        # Initialize all weights to be == 1.
        initializer = InitializerApplicator([(".*", lambda tensor: torch.nn.init.constant_(tensor, 1.))])
        initializer(augmented_lstm)
        initializer(pytorch_lstm)

        initial_state = torch.zeros([1, 5, 11])
        initial_memory = torch.zeros([1, 5, 11])

        # Use bigger numbers to avoid floating point instability.
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor * 5., self.sequence_lengths)
        lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)

        augmented_output, augmented_state = augmented_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
        augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)

        numpy.testing.assert_array_almost_equal(pytorch_output_sequence.data.numpy(),
                                                augmented_output_sequence.data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[0].data.numpy(),
                                                augmented_state[0].data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[1].data.numpy(),
                                                augmented_state[1].data.numpy(), decimal=4)
开发者ID:pyknife,项目名称:allennlp,代码行数:26,代码来源:augmented_lstm_test.py

示例6: __getitem__

    def __getitem__(self, idx):

        face_ind = 1
        if idx < self.n_MSR:
            vid = self.train_list[idx]
            text = self.text_features[vid]
            r = random.randint(0, len(text)-1)
            text = text[r]
            flow = self.flow_features[vid]
            audio = self.audio_features[vid]
            video = self.visual_features[vid]
            face = self.face_features[vid]

            if np.sum(face) == 0:
                face_ind = 0
        elif self.coco:
            video = self.coco_visual[idx-self.n_MSR]
            text = self.coco_text[idx-self.n_MSR]
            audio = th.zeros(1,128)
            flow = th.zeros(1024)
            face = th.zeros(128)
            face_ind = 0

        return {'video': video, 
                'flow': flow,
                'face': face,
                'text': text,
                'coco_ind': self.coco_ind[idx],
                'face_ind': face_ind,
                'audio': audio
                }
开发者ID:lvaleriu,项目名称:Mixture-of-Embedding-Experts,代码行数:31,代码来源:MSRVTT.py

示例7: singleTagLoss

def singleTagLoss(pred_tag, keypoints):
    """
    associative embedding loss for one image
    """
    eps = 1e-6
    tags = []
    pull = 0
    for i in keypoints:
        tmp = []
        for j in i:
            if j[1]>0:
                tmp.append(pred_tag[j[0]])
        if len(tmp) == 0:
            continue
        tmp = torch.stack(tmp)
        tags.append(torch.mean(tmp, dim=0))
        pull = pull +  torch.mean((tmp - tags[-1].expand_as(tmp))**2)

    if len(tags) == 0:
        return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float())

    tags = torch.stack(tags)[:,0]

    num = tags.size()[0]
    size = (num, num, tags.size()[1])
    A = tags.unsqueeze(dim=1).expand(*size)
    B = A.permute(1, 0, 2)

    diff = A - B
    diff = torch.pow(diff, 2).sum(dim=2)[:,:,0]
    push = torch.exp(-diff)
    push = (torch.sum(push) - num)
    return push/((num - 1) * num + eps) * 0.5, pull/(num + eps)
开发者ID:cuizy15,项目名称:pose-ae-train,代码行数:33,代码来源:loss.py

示例8: forward

    def forward(self, X_list_of_chains):
        
        """
        X is a list of tensors from which to evaluate the performance.
        Every element in X can have any length.
        The batch size is 1 in this case... we just run it a number times
        
        """
        self.sample_posterior()

#        print ("Total_sample_dim", X.shape)
        h_t = torch.zeros(X_list_of_chains[0].size(1), self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)
        c_t = torch.zeros(X_list_of_chains[0].size(1),  self.cf_a.HS, dtype=self.cf_a.dtype,device = self.cf_a.device)

        ## We generate the output for every vector in the chain
        outputs = []
        for X in X_list_of_chains:
            for i, input_t in enumerate(X.chunk(X.size(0), dim=0)):
                input_t = input_t[:,0,:]
#                print ("One_timestep_dim",input_t.shape)
                h_t, c_t = self.lstm1(input_t, (h_t, c_t))
        
            output = self.linear(h_t)
            outputs += [output]
        outputs = torch.cat(outputs, 0)
#        print ("prediction dim ", output.shape)
#        print ("predictions dim ", outputs.shape)
        return outputs
开发者ID:manuwhs,项目名称:Trapyng,代码行数:28,代码来源:RNN_names_classifier_fullVB.py

示例9: test_make_scipy_bounds

    def test_make_scipy_bounds(self):
        X = torch.zeros(3, 1, 2)
        # both None
        self.assertIsNone(make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=None))
        # lower None
        upper_bounds = torch.ones(2)
        bounds = make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=upper_bounds)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(
            np.all(np.equal(bounds.lb, np.full((3, 1, 2), float("-inf")).flatten()))
        )
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
        # upper None
        lower_bounds = torch.zeros(2)
        bounds = make_scipy_bounds(X=X, lower_bounds=lower_bounds, upper_bounds=None)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(
            np.all(np.equal(bounds.ub, np.full((3, 1, 2), float("inf")).flatten()))
        )
        # floats
        bounds = make_scipy_bounds(X=X, lower_bounds=0.0, upper_bounds=1.0)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))

        # 1-d tensors
        bounds = make_scipy_bounds(
            X=X, lower_bounds=lower_bounds, upper_bounds=upper_bounds
        )
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
开发者ID:saschwan,项目名称:botorch,代码行数:33,代码来源:test_parameter_constraints.py

示例10: _construct_previous

    def _construct_previous(self, layer, direction, inputs, tree, idx):
        if direction == 'up':
            oidx = tree.children_idx(idx)
        else:
            oidx = tree.parents_idx(idx)

        if oidx:
            h_prev, c_prev = [], []

            for i in oidx:
                h_prev_i, c_prev_i = self._upward_downward(layer,
                                                           direction,
                                                           inputs,
                                                           tree, i)

                h_prev.append(h_prev_i)
                c_prev.append(c_prev_i)

            h_prev = torch.stack(h_prev, 1)
            c_prev = torch.stack(c_prev, 1)

        elif inputs.is_cuda:
            h_prev = torch.zeros(self.hidden_size, 1).cuda()
            c_prev = torch.zeros(self.hidden_size, 1).cuda()

        else:
            h_prev = torch.zeros(self.hidden_size, 1)
            c_prev = torch.zeros(self.hidden_size, 1)

        return oidx, (h_prev, c_prev)
开发者ID:ShaorongYan,项目名称:factslab-python,代码行数:30,代码来源:childsumtreelstm.py

示例11: _construct_x_t

    def _construct_x_t(self, layer, inputs, idx, tree):
        if layer > 0 and self.bidirectional:
            x_t = torch.cat([self.hidden_state[layer - 1]['up'][idx],
                             self.hidden_state[layer - 1]['down'][idx]])
        elif layer > 0:
            x_t = self.hidden_state[layer - 1]['up'][idx]
        else:
            if idx in tree.terminal_indices:
                string_idx = tree.terminal_indices.index(idx)

                if self._has_batch_dimension:
                    x_t = inputs[string_idx, 0]
                else:
                    x_t = inputs[string_idx]
            else:
                if self._has_batch_dimension:
                    x_t_raw = torch.zeros(self.input_size, 1)
                else:
                    x_t_raw = torch.zeros(self.input_size)

                if inputs.is_cuda:
                    x_t = x_t_raw.cuda()

                else:
                    x_t = x_t_raw

        return x_t
开发者ID:ShaorongYan,项目名称:factslab-python,代码行数:27,代码来源:childsumtreelstm.py

示例12: addition_feature

    def addition_feature(self, index):
        data = [self.context, self.question]
        add_features = [None, None]

        for k in range(len(data)):
            features = {}
            tmp_seq_len = data[k]['token'].shape[1]

            if self.config['use_pos']:
                features['pos'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2pos'])), dtype=torch.float)
                for i, ele in enumerate(data[k]['pos'][index]):
                    if ele == PreprocessData.padding_idx:
                        break
                    features['pos'][i, ele] = 1

            if self.config['use_ent']:
                features['ent'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2ent'])), dtype=torch.float)
                for i, ele in enumerate(data[k]['ent'][index]):
                    if ele == PreprocessData.padding_idx:
                        break
                    features['ent'][i, ele] = 1

            if self.config['use_em']:
                features['em'] = to_float_tensor(data[k]['em'][index]).unsqueeze(-1)
            if self.config['use_em_lemma']:
                features['em_lemma'] = to_float_tensor(data[k]['em_lemma'][index]).unsqueeze(-1)

            if len(features) > 0:
                add_features[k] = torch.cat(list(features.values()), dim=-1)

        return add_features
开发者ID:SerenaKhoo,项目名称:Match-LSTM,代码行数:31,代码来源:squad_dataset.py

示例13: fit

    def fit(self):
        args = self.args

        for epoch in range(args.max_epochs):
            self.G.train()
            self.D.train()
            for step, inputs in enumerate(self.train_loader):
                batch_size = inputs[0].size(0)

                images = inputs[0].to(self.device)
                labels = inputs[1].to(self.device)
                
                # create the labels used to distingush real or fake
                real_labels = torch.ones(batch_size, dtype=torch.int64).to(self.device)
                fake_labels = torch.zeros(batch_size, dtype=torch.int64).to(self.device)
                
                # train the discriminator
                
                # discriminator <- real image
                D_real, D_real_cls = self.D(images)
                D_loss_real = self.loss_fn(D_real, real_labels)
                D_loss_real_cls = self.loss_fn(D_real_cls, labels)
                
                # noise vector
                z = torch.randn(batch_size, args.z_dim).to(self.device)

                # make label to onehot vector
                y_onehot = torch.zeros((batch_size, 10)).to(self.device)
                y_onehot.scatter_(1, labels.unsqueeze(1), 1)
                y_onehot.requires_grad_(False)
                
                # discriminator <- fake image
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                D_loss_fake = self.loss_fn(D_fake, fake_labels)
                D_loss_fake_cls = self.loss_fn(D_fake_cls, labels)
                
                D_loss = D_loss_real + D_loss_fake + \
                         D_loss_real_cls + D_loss_fake_cls
                self.D.zero_grad()
                D_loss.backward()
                self.optim_D.step()
                
                # train the generator

                z = torch.randn(batch_size, args.z_dim).to(self.device)
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                
                G_loss = self.loss_fn(D_fake, real_labels) + \
                         self.loss_fn(D_fake_cls, labels)
                self.G.zero_grad()
                G_loss.backward()
                self.optim_G.step()

            if (epoch+1) % args.print_every == 0:
                print("Epoch [{}/{}] Loss_D: {:.3f}, Loss_G: {:.3f}".
                    format(epoch+1, args.max_epochs, D_loss.item(), G_loss.item()))
                self.save(args.ckpt_dir, epoch+1)
                self.sample(epoch+1)
开发者ID:muncok,项目名称:pytorch-exercise,代码行数:60,代码来源:solver.py

示例14: encoder_forward

    def encoder_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the encoder works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        if opt.rnn_size > 0:
            opt.enc_rnn_size = opt.rnn_size
        word_field = self.get_field()
        embeddings = build_embeddings(opt, word_field)
        enc = build_encoder(opt, embeddings)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)

        hidden_t, outputs, test_length = enc(test_src, test_length)

        # Initialize vectors to compare size with
        test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.enc_rnn_size)
        test_out = torch.zeros(source_l, bsize, opt.dec_rnn_size)

        # Ensure correct sizes and types
        self.assertEqual(test_hid.size(),
                         hidden_t[0].size(),
                         hidden_t[1].size())
        self.assertEqual(test_out.size(), outputs.size())
        self.assertEqual(type(outputs), torch.Tensor)
开发者ID:Unbabel,项目名称:OpenNMT-py,代码行数:30,代码来源:test_models.py

示例15: sample

    def sample(self, mu, logvar, k):

        # print (mu)
        # print (logvar)


        if torch.cuda.is_available():
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]

            # print (mu.size())
            # print (logvar.size())
            # print (eps.size())

            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()), 
                                Variable(torch.zeros(self.B, self.z_size)).cuda())  #[P,B]



            # logqz = lognormal(z, mu, logvar)

            logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))



        else:
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                                Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
            logqz = lognormal(z, mu, logvar) 
        return z, logpz, logqz
开发者ID:chriscremer,项目名称:Other_Code,代码行数:32,代码来源:vae_with_policy.py


注:本文中的torch.zeros函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。