当前位置: 首页>>代码示例>>Python>>正文


Python torch.autograd方法代码示例

本文整理汇总了Python中torch.autograd方法的典型用法代码示例。如果您正苦于以下问题:Python torch.autograd方法的具体用法?Python torch.autograd怎么用?Python torch.autograd使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.autograd方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def forward(self, inputs, hidden=None):  
        if hidden is None and self.mode != "jordan":
        # if hidden is None:
            batch_size = inputs.size(0)
            # print(batch_size)
            hidden = torch.autograd.Variable(torch.zeros(batch_size,
                                                       self.hidden_size))
            if self.cuda:
                hidden = hidden.cuda()

        output_forward, hidden_forward = self._forward(inputs, hidden)
        output_forward = torch.stack(output_forward, dim=0)
        if not self.bidirectional:
            if self.batch_first:
                output_forward = output_forward.transpose(0,1)
            return output_forward, hidden_forward

        output_reversed, hidden_reversed = self._reversed_forward(inputs, hidden)
        hidden = torch.cat([hidden_forward, hidden_reversed], dim=hidden_forward.dim() - 1)
        output_reversed = torch.stack(output_reversed, dim=0)
        output = torch.cat([output_forward, output_reversed],
                                dim=output_reversed.data.dim() - 1)
        if self.batch_first:
            output = output.transpose(0,1)
        return output, hidden 
开发者ID:llhthinker,项目名称:slot-filling,代码行数:27,代码来源:rnn.py

示例2: test_pruneFeatureMap_ShouldPruneRightParams

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def test_pruneFeatureMap_ShouldPruneRightParams(self):
        dropped_index = 0
        output = self.module(self.input)
        torch.autograd.backward(output, self.upstream_gradient)

        old_weight_size = self.module.weight.size()
        old_bias_size = self.module.bias.size()
        old_out_channels = self.module.out_channels
        old_weight_values = self.module.weight.data.cpu().numpy()

        # ensure that the chosen index is dropped
        self.module.prune_feature_map(dropped_index)

        # check bias size
        self.assertEqual(self.module.bias.size()[0], (old_bias_size[0]-1))
        # check output channels
        self.assertEqual(self.module.out_channels, old_out_channels-1)

        _, *other_old_weight_sizes = old_weight_size
        # check weight size
        self.assertEqual(self.module.weight.size(), (old_weight_size[0]-1, *other_old_weight_sizes))
        # check weight value
        expected = np.delete(old_weight_values, dropped_index , 0)
        self.assertTrue(np.array_equal(self.module.weight.data.cpu().numpy(), expected)) 
开发者ID:alexfjw,项目名称:prunnable-layers-pytorch,代码行数:26,代码来源:prunable_nn_test.py

示例3: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def forward(self, x):
        if x.get_device() == self.devices[0]:
            # Master mode
            extra = {
                "is_master": True,
                "master_queue": self.master_queue,
                "worker_queues": self.worker_queues,
                "worker_ids": self.worker_ids
            }
        else:
            # Worker mode
            extra = {
                "is_master": False,
                "master_queue": self.master_queue,
                "worker_queue": self.worker_queues[self.worker_ids.index(x.get_device())]
            }

        return inplace_abn_sync(x, self.weight, self.bias, autograd.Variable(self.running_mean),
                                autograd.Variable(self.running_var), extra, self.training, self.momentum, self.eps,
                                self.activation, self.slope) 
开发者ID:speedinghzl,项目名称:pytorch-segmentation-toolbox,代码行数:22,代码来源:bn.py

示例4: reinforce_backward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def reinforce_backward(self, score, rewards):
        agg_score, sel_score, cond_score = score

        cur_reward = rewards[:]
        eof = self.SQL_TOK.index('<END>')
        for t in range(len(cond_score[1])):
            reward_inp = torch.FloatTensor(cur_reward).unsqueeze(1)
            if self.gpu:
                reward_inp = reward_inp.cuda()
            cond_score[1][t].reinforce(reward_inp)

            for b in range(len(rewards)):
                if cond_score[1][t][b].data.cpu().numpy()[0] == eof:
                    cur_reward[b] = 0
        torch.autograd.backward(cond_score[1], [None for _ in cond_score[1]])
        return 
开发者ID:llSourcell,项目名称:SQL_Database_Optimization,代码行数:18,代码来源:seq2sql.py

示例5: main

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def main(args):
    dataset = load_config(args.dataset)

    num_classes = len(dataset["common"]["classes"])
    net = UNet(num_classes)

    def map_location(storage, _):
        return storage.cpu()

    chkpt = torch.load(args.checkpoint, map_location=map_location)
    net = torch.nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])

    # Todo: make input channels configurable, not hard-coded to three channels for RGB
    batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size))

    torch.onnx.export(net, batch, args.model) 
开发者ID:mapbox,项目名称:robosat,代码行数:19,代码来源:export.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def forward(self, x):
        device_id = x.get_device() if torch.cuda.is_available() else None
        feature = self.dnn(x)
        rows, cols = feature.size()[-2:]
        cells = rows * cols
        _feature = feature.permute(0, 2, 3, 1).contiguous().view(feature.size(0), cells, self.anchors.size(0), -1)
        sigmoid = F.sigmoid(_feature[:, :, :, :3])
        iou = sigmoid[:, :, :, 0]
        ij = torch.autograd.Variable(utils.ensure_device(meshgrid(rows, cols).view(1, -1, 1, 2), device_id))
        center_offset = sigmoid[:, :, :, 1:3]
        center = ij + center_offset
        size_norm = _feature[:, :, :, 3:5]
        anchors = torch.autograd.Variable(utils.ensure_device(self.anchors.view(1, 1, -1, 2), device_id))
        size = torch.exp(size_norm) * anchors
        size2 = size / 2
        yx_min = center - size2
        yx_max = center + size2
        logits = _feature[:, :, :, 5:] if _feature.size(-1) > 5 else None
        return feature, iou, center_offset, size_norm, yx_min, yx_max, logits 
开发者ID:ruiminshen,项目名称:yolo2-pytorch,代码行数:21,代码来源:__init__.py

示例7: evaluate_stocha_val_acc

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def evaluate_stocha_val_acc(model):
    model.eval()
    acc_cum = 0
    N = dvd_sub.shape[0]
    for i in range(N):
        X = dvd_sub[i, :, :]
        X = autograd.Variable(torch.from_numpy(X).float().cuda())
        X = X.view(len(X), 1, -1)
        y_pred = model(X)
        y_pred = y_pred.data.cpu().max(1)[1].numpy()[0]
        if y_pred == dvl_sub[1][i]:
            acc_cum += 1
    return acc_cum*100/N


# ## observations
# * better to use the log_softmax instead of softmax
# * decrease lr succicesively to get better results

# In[19]:


#training function 
开发者ID:Naman-ntc,项目名称:Action-Recognition,代码行数:25,代码来源:LSTM_classifierX3_CUDA-xsub.py

示例8: get_C_hat_transpose

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def get_C_hat_transpose():
    probs = []
    net.eval()
    for batch_idx, (data, target) in enumerate(train_gold_deterministic_loader):
        # we subtract 10 because we added 10 to gold so we could identify which example is gold in train_phase2
        data, target = torch.autograd.Variable(data.cuda(), volatile=True),\
                       torch.autograd.Variable((target - num_classes).cuda(), volatile=True)

        # forward
        output = net(data)
        pred = F.softmax(output)
        probs.extend(list(pred.data.cpu().numpy()))

    probs = np.array(probs, dtype=np.float32)
    preds = np.argmax(probs, axis=1)
    C_hat = np.zeros([num_classes, num_classes])
    for i in range(len(train_data_gold.train_labels)):
        C_hat[int(np.rint(train_data_gold.train_labels[i] - num_classes)), preds[i]] += 1

    C_hat /= (np.sum(C_hat, axis=1, keepdims=True) + 1e-7)
    C_hat = C_hat * 0.99 + np.full_like(C_hat, 1/num_classes) * 0.01  # smoothing

    return C_hat.T.astype(np.float32) 
开发者ID:mmazeika,项目名称:glc,代码行数:25,代码来源:train_confusion.py

示例9: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def forward(self, x): 
        nB = x.data.size(0)
        nC = x.data.size(1)
        nH = x.data.size(2)
        nW = x.data.size(3)
        samples = nB*nH*nW
        y = x.view(nB, nC, nH*nW).transpose(1,2).contiguous().view(-1,nC)
        if self.training:
            print('forward in training mode on autograd')
            m = Variable(y.mean(0).data, requires_grad=False)
            v = Variable(y.var(0).data, requires_grad=False)
            self.running_mean = (1-self.momentum)*self.running_mean + self.momentum * m.data.view(-1)
            self.running_var = (1-self.momentum)*self.running_var + self.momentum * v.data.view(-1)
            m = m.repeat(samples, 1)
            v = v.repeat(samples, 1)*(samples-1.0)/samples
        else:
            m = Variable(self.running_mean.repeat(samples, 1), requires_grad=False)
            v = Variable(self.running_var.repeat(samples, 1), requires_grad=False)
        w = self.weight.repeat(samples, 1)
        b = self.bias.repeat(samples, 1)
        y = (y - m)/(v+self.eps).sqrt() * w + b 
        y = y.view(nB, nH*nW, nC).transpose(1,2).contiguous().view(nB, nC, nH, nW) 
        return y 
开发者ID:andy-yun,项目名称:pytorch-0.4-yolov3,代码行数:25,代码来源:bn.py

示例10: evaluate

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def evaluate(data_loader, lm_model, criterion, limited = 76800):
    print('evaluating')
    lm_model.eval()

    iterator = data_loader.get_tqdm()

    lm_model.init_hidden()
    total_loss = 0
    total_len = 0
    for word_t, label_t in iterator:
        label_t = label_t.view(-1)
        tmp_len = label_t.size(0)
        output = lm_model.log_prob(word_t)
        total_loss += tmp_len * utils.to_scalar(criterion(autograd.Variable(output), label_t))
        total_len += tmp_len

        if limited >=0 and total_len > limited:
            break

    ppl = math.exp(total_loss / total_len)
    print('PPL: ' + str(ppl))

    return ppl 
开发者ID:LiyuanLucasLiu,项目名称:RAdam,代码行数:25,代码来源:train_1bw.py

示例11: normalize_image

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def normalize_image(image, forward=True, mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225]):
        im_size = image.size()
        mean=torch.FloatTensor(mean).unsqueeze(1).unsqueeze(2)
        std=torch.FloatTensor(std).unsqueeze(1).unsqueeze(2)
        if image.is_cuda:
            mean = mean.cuda()
            std = std.cuda()
        if isinstance(image,torch.autograd.variable.Variable):
            mean = Variable(mean,requires_grad=False)
            std = Variable(std,requires_grad=False)
        if forward:
            if len(im_size)==3:
                result = image.sub(mean.expand(im_size)).div(std.expand(im_size))
            elif len(im_size)==4:
                result = image.sub(mean.unsqueeze(0).expand(im_size)).div(std.unsqueeze(0).expand(im_size))
        else:
            if len(im_size)==3:
                result = image.mul(std.expand(im_size)).add(mean.expand(im_size))
            elif len(im_size)==4:
                result = image.mul(std.unsqueeze(0).expand(im_size)).add(mean.unsqueeze(0).expand(im_size))
                
        return  result 
开发者ID:ignacio-rocco,项目名称:weakalign,代码行数:24,代码来源:normalization.py

示例12: pearlmutter_hvp

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def pearlmutter_hvp(kl_func, all_obs, old_dist, policy, v):
    """
    TODO (ewei) add docstring here.

    Parameters
    ----------
    see docstring of finite_diff_hvp function.

    Returns
    -------
    see docstring of finite_diff_hvp function.
    """
    policy.zero_grad()
    kl_div = kl_func(policy, all_obs, old_dist)
    param_grads = torch.autograd.grad(kl_div, policy.ordered_params(),
        create_graph=True)
    flat_grad = torch.cat([grad.view(-1) for grad in param_grads])
    gradient_vector_product = torch.sum(flat_grad * Variable(v))
    hessian_vector_product = torch.autograd.grad(gradient_vector_product,
        policy.ordered_params())
    flat_hvp = torch.cat([product.contiguous().view(-1) for product in hessian_vector_product])
    return flat_hvp.data 
开发者ID:nosyndicate,项目名称:pytorchrl,代码行数:24,代码来源:trpo.py

示例13: finish_episode

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def finish_episode():
    R = 0
    saved_actions = model.saved_actions
    value_loss = 0
    rewards = []
    for r in model.rewards[::-1]:
        R = r + args.gamma * R
        rewards.insert(0, R)
    rewards = torch.Tensor(rewards)
    rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)
    for (action, value), r in zip(saved_actions, rewards):
        reward = r - value.data[0,0]
        action.reinforce(reward)
        value_loss += F.smooth_l1_loss(value, Variable(torch.Tensor([r])))
    optimizer.zero_grad()
    final_nodes = [value_loss] + list(map(lambda p: p.action, saved_actions))
    gradients = [torch.ones(1)] + [None] * len(saved_actions)
    autograd.backward(final_nodes, gradients)
    optimizer.step()
    del model.rewards[:]
    del model.saved_actions[:] 
开发者ID:nosyndicate,项目名称:pytorchrl,代码行数:23,代码来源:actor_critic.py

示例14: compute_gradient_penalty

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def compute_gradient_penalty(D, real_samples, fake_samples):
    """Calculates the gradient penalty loss for WGAN GP"""
    # Random weight term for interpolation between real and fake samples
    alpha = torch.cuda.FloatTensor(np.random.random((real_samples.size(0), 1)))
    # Get random interpolation between real and fake samples
    interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
    d_interpolates = D(interpolates)
    fake = torch.autograd.Variable(torch.cuda.FloatTensor(real_samples.shape[0], 1).fill_(1.0), requires_grad=False)
    # Get gradient w.r.t. interpolates
    gradients = torch.autograd.grad(
        outputs=d_interpolates,  # fack samples
        inputs=interpolates,   # real samples
        grad_outputs=fake,
        create_graph=True,
        retain_graph=True,
        only_inputs=True,
    )[0]
    gradients = gradients.view(gradients.size(0), -1)
    gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
    return gradient_penalty 
开发者ID:hwang1996,项目名称:ACME,代码行数:22,代码来源:train.py

示例15: sliced_score_matching

# 需要导入模块: import torch [as 别名]
# 或者: from torch import autograd [as 别名]
def sliced_score_matching(energy_net, samples, n_particles=1):
    dup_samples = samples.unsqueeze(0).expand(n_particles, *samples.shape).contiguous().view(-1, *samples.shape[1:])
    dup_samples.requires_grad_(True)
    vectors = torch.randn_like(dup_samples)
    vectors = vectors / torch.norm(vectors, dim=-1, keepdim=True)

    logp = -energy_net(dup_samples).sum()
    grad1 = autograd.grad(logp, dup_samples, create_graph=True)[0]
    gradv = torch.sum(grad1 * vectors)
    loss1 = torch.sum(grad1 * vectors, dim=-1) ** 2 * 0.5
    grad2 = autograd.grad(gradv, dup_samples, create_graph=True)[0]
    loss2 = torch.sum(vectors * grad2, dim=-1)

    loss1 = loss1.view(n_particles, -1).mean(dim=0)
    loss2 = loss2.view(n_particles, -1).mean(dim=0)
    loss = loss1 + loss2
    return loss.mean(), loss1.mean(), loss2.mean() 
开发者ID:ermongroup,项目名称:ncsn,代码行数:19,代码来源:sliced_sm.py


注:本文中的torch.autograd方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。