当前位置: 首页>>代码示例>>Python>>正文


Python Variable.dim方法代码示例

本文整理汇总了Python中torch.autograd.Variable.dim方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.dim方法的具体用法?Python Variable.dim怎么用?Python Variable.dim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.dim方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: probs

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import dim [as 别名]
    def probs(self, generator, outputs, vocab_pointer_switches, context_question_switches, 
        context_attention, question_attention, 
        context_indices, question_indices, 
        oov_to_limited_idx):

        size = list(outputs.size())

        size[-1] = self.generative_vocab_size
        scores = generator(outputs.view(-1, outputs.size(-1))).view(size)
        p_vocab = F.softmax(scores, dim=scores.dim()-1)
        scaled_p_vocab = vocab_pointer_switches.expand_as(p_vocab) * p_vocab

        effective_vocab_size = self.generative_vocab_size + len(oov_to_limited_idx)
        if self.generative_vocab_size < effective_vocab_size:
            size[-1] = effective_vocab_size - self.generative_vocab_size
            buff = Variable(scaled_p_vocab.data.new(*size).fill_(EPSILON))
            scaled_p_vocab = torch.cat([scaled_p_vocab, buff], dim=buff.dim()-1)

        p_context_ptr = Variable(scaled_p_vocab.data.new(*scaled_p_vocab.size()).fill_(EPSILON))
        p_context_ptr.scatter_add_(p_context_ptr.dim()-1, context_indices.unsqueeze(1).expand_as(context_attention), context_attention)
        scaled_p_context_ptr = (context_question_switches * (1 - vocab_pointer_switches)).expand_as(p_context_ptr) * p_context_ptr

        p_question_ptr = Variable(scaled_p_vocab.data.new(*scaled_p_vocab.size()).fill_(EPSILON))
        p_question_ptr.scatter_add_(p_question_ptr.dim()-1, question_indices.unsqueeze(1).expand_as(question_attention), question_attention)
        scaled_p_question_ptr = ((1 - context_question_switches) * (1 - vocab_pointer_switches)).expand_as(p_question_ptr) * p_question_ptr

        probs = scaled_p_vocab + scaled_p_context_ptr + scaled_p_question_ptr
        return probs
开发者ID:AhlamMD,项目名称:decaNLP,代码行数:30,代码来源:multitask_question_answering_network.py

示例2: replace_masked_values

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import dim [as 别名]
def replace_masked_values(tensor: Variable, mask: Variable, replace_with: float) -> Variable:
    """
    Replaces all masked values in ``tensor`` with ``replace_with``.  ``mask`` must be broadcastable
    to the same shape as ``tensor``. We require that ``tensor.dim() == mask.dim()``, as otherwise we
    won't know which dimensions of the mask to unsqueeze.
    """
    # We'll build a tensor of the same shape as `tensor`, zero out masked values, then add back in
    # the `replace_with` value.
    if tensor.dim() != mask.dim():
        raise ConfigurationError("tensor.dim() (%d) != mask.dim() (%d)" % (tensor.dim(), mask.dim()))
    one_minus_mask = 1.0 - mask
    values_to_add = replace_with * one_minus_mask
    return tensor * mask + values_to_add
开发者ID:cyzhangAThit,项目名称:GLUE-baselines,代码行数:15,代码来源:util.py

示例3: backward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import dim [as 别名]
    def backward(self, gradient, image):
        # lazy import
        import torch
        from torch.autograd import Variable

        assert gradient.ndim == 1

        gradient = torch.from_numpy(gradient)
        if self.cuda:  # pragma: no cover
            gradient = gradient.cuda()
        gradient = Variable(gradient)

        image = self._process_input(image)
        assert image.ndim == 3
        images = image[np.newaxis]
        images = torch.from_numpy(images)
        if self.cuda:  # pragma: no cover
            images = images.cuda()
        images = Variable(images, requires_grad=True)
        predictions = self._model(images)

        print(predictions.size())
        predictions = predictions[0]

        assert gradient.dim() == 1
        assert predictions.dim() == 1
        assert gradient.size() == predictions.size()

        loss = torch.dot(predictions, gradient)
        loss.backward()
        # should be the same as predictions.backward(gradient=gradient)

        grad = images.grad

        grad = grad.data
        if self.cuda:  # pragma: no cover
            grad = grad.cpu()
        grad = grad.numpy()
        grad = self._process_gradient(grad)
        grad = np.squeeze(grad, axis=0)
        assert grad.shape == image.shape

        return grad
开发者ID:wfleshman,项目名称:foolbox,代码行数:45,代码来源:pytorch.py

示例4: nllloss_double_backwards

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import dim [as 别名]
def nllloss_double_backwards(ctx, ggI):
    t = ctx.saved_variables
    target = t[1]
    weights = Variable(ctx.additional_args[1])
    size_average = ctx.additional_args[0]
    ignore_index = ctx.additional_args[3]
    reduce = ctx.additional_args[4]

    gI = None

    # can't scatter/gather on indices outside of range, let's just put them in range
    # and 0 out the weights later (so it doesn't matter where in range we put them)
    target_mask = target == ignore_index
    safe_target = target.clone()
    safe_target.masked_fill_(target_mask, 0)

    if weights.dim() == 0:
        weights_to_scatter = Variable(ggI.data.new(safe_target.size()).fill_(1))
    else:
        weights_maybe_resized = weights
        while weights_maybe_resized.dim() < target.dim():
            weights_maybe_resized = weights_maybe_resized.unsqueeze(1)

        weights_maybe_resized = weights_maybe_resized.expand(weights.size()[0:1] + target.size()[1:])
        weights_to_scatter = weights_maybe_resized.gather(0, safe_target)

    weights_to_scatter.masked_fill_(target_mask, 0)
    divisor = weights_to_scatter.sum() if size_average and reduce else 1
    weights_to_scatter = -1 * weights_to_scatter / divisor
    zeros = Variable(ggI.data.new(ggI.size()).zero_())
    mask = zeros.scatter_(1, safe_target.unsqueeze(1), weights_to_scatter.unsqueeze(1))

    if reduce:
        ggO = (ggI * mask).sum()
    else:
        ggO = (ggI * mask).sum(dim=1)

    return gI, None, ggO, None, None, None
开发者ID:Jsmilemsj,项目名称:pytorch,代码行数:40,代码来源:auto_double_backwards.py


注:本文中的torch.autograd.Variable.dim方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。