当前位置: 首页>>代码示例>>Python>>正文


Python Variable.pow方法代码示例

本文整理汇总了Python中torch.autograd.Variable.pow方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.pow方法的具体用法?Python Variable.pow怎么用?Python Variable.pow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.pow方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: vector_grad

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
def vector_grad():
    x = Variable(torch.ones(2)*3, requires_grad=True)
    y = Variable(torch.ones(2)*4, requires_grad=True)
    z = x.pow(2) + 3*y.pow(2)
    z.backward(torch.ones(2))
    print(x.grad)
    print(y.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:9,代码来源:gradient.py

示例2: grad

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
def grad():
    W = Variable(torch.FloatTensor([[1, 1, 1], [2, 2, 2]]), requires_grad=True)
    x = Variable(torch.FloatTensor([1, 2, 3]), requires_grad=False)
    B = Variable(torch.FloatTensor([2, 2]), requires_grad=True)

    u = Variable(torch.FloatTensor([0, 0, 0]), requires_grad=False)

    y = W.mv(x-u) + B.pow(2)
    z = W.mv(x - u) + B.pow(2)

    # y.backward(torch.ones(1))   #
    #
    # print(W.grad)
    # print(B.grad)
    #
    # W.grad.data.zero_()
    # B.grad.data.zero_()
    #
    # z.backward(torch.ones(1))
    #
    # print(W.grad)
    # print(B.grad)

    r = y + z
    r.backward(torch.ones(1))
    print(W.grad)
    print(B.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:29,代码来源:gradient.py

示例3: scalar_grad

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
def scalar_grad():
    x = Variable(torch.ones(1)*3, requires_grad=True)   # 3
    y = Variable(torch.ones(1)*4, requires_grad=True)   # 4
    z = x.pow(2) + 3*y.pow(2)

    z.backward()

    # dz/dx = 2x
    # dz/dy = 6y

    print(x.grad)
    print(y.grad)
开发者ID:gonglixue,项目名称:PRML_Python,代码行数:14,代码来源:gradient.py

示例4: update

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
    def update(self):
        

        next_value = self.actor_critic(Variable(self.rollouts.states[-1], volatile=True))[0].data

        self.rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.tau)

        # values, action_log_probs, dist_entropy = self.actor_critic.evaluate_actions(
        #                                             Variable(self.rollouts.states[:-1].view(-1, *self.obs_shape)), 
        #                                             Variable(self.rollouts.actions.view(-1, self.action_shape)))


        values = torch.cat(self.rollouts.value_preds, 0).view(self.num_steps, self.num_processes, 1) 
        action_log_probs = torch.cat(self.rollouts.action_log_probs).view(self.num_steps, self.num_processes, 1)
        dist_entropy = torch.cat(self.rollouts.dist_entropy).view(self.num_steps, self.num_processes, 1)


        self.rollouts.value_preds = []
        self.rollouts.action_log_probs = []
        self.rollouts.dist_entropy = []

        advantages = Variable(self.rollouts.returns[:-1]) - values
        value_loss = advantages.pow(2).mean()

        action_loss = -(Variable(advantages.data) * action_log_probs).mean()

        self.optimizer.zero_grad()
        cost = action_loss + value_loss*self.value_loss_coef - dist_entropy.mean()*self.entropy_coef
        cost.backward()

        nn.utils.clip_grad_norm(self.actor_critic.parameters(), self.grad_clip)

        self.optimizer.step()
开发者ID:chriscremer,项目名称:Other_Code,代码行数:35,代码来源:a2c_agents.py

示例5: update

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
    def update(self):
        
        next_value = self.actor_critic(Variable(self.rollouts.states[-1], volatile=True))[0].data
        self.rollouts.compute_returns(next_value, self.use_gae, self.gamma, self.tau)
        advantages = self.rollouts.returns[:-1] - self.rollouts.value_preds[:-1]

        for _ in range(self.a2c_epochs):
            sampler = BatchSampler(SubsetRandomSampler(range(self.num_processes * self.num_steps)), self.batch_size * self.num_processes, drop_last=True)
            for indices in sampler:
                indices = torch.LongTensor(indices)
                if self.cuda:
                    indices = indices.cuda()
                states_batch = self.rollouts.states[:-1].view(-1, *self.obs_shape)[indices]
                actions_batch = self.rollouts.actions.view(-1, self.action_shape)[indices]
                return_batch = self.rollouts.returns[:-1].view(-1, 1)[indices]
                adv_targ = Variable(advantages.view(-1, 1)[indices])

                values, action_log_probs, dist_entropy = self.actor_critic.evaluate_actions(Variable(states_batch), Variable(actions_batch))

                value_loss = adv_targ.pow(2).mean()

                action_loss = -(adv_targ * action_log_probs).mean()

                self.optimizer.zero_grad()
                cost = action_loss + value_loss*self.value_loss_coef - dist_entropy*self.entropy_coef
                cost.backward()
                self.optimizer.step()
开发者ID:chriscremer,项目名称:Other_Code,代码行数:29,代码来源:agent_modular2.py

示例6: SPECTROGRAM

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
class SPECTROGRAM(object):
    """Create a spectrogram from a raw audio signal

    Args:
        sr (int): sample rate of audio signal
        ws (int): window size, often called the fft size as well
        hop (int, optional): length of hop between STFT windows. default: ws // 2
        n_fft (int, optional): number of fft bins. default: ws // 2 + 1
        pad (int): two sided padding of signal
        window (torch windowing function): default: torch.hann_window
        wkwargs (dict, optional): arguments for window function

    """
    def __init__(self, sr=16000, ws=400, hop=None, n_fft=None,
                 pad=0, window=torch.hann_window, wkwargs=None):
        if isinstance(window, Variable):
            self.window = window
        else:
            self.window = window(ws) if wkwargs is None else window(ws, **wkwargs)
            self.window = Variable(self.window, volatile=True)
        self.sr = sr
        self.ws = ws
        self.hop = hop if hop is not None else ws // 2
        self.n_fft = n_fft  # number of fft bins
        self.pad = pad
        self.wkwargs = wkwargs

    def __call__(self, sig):
        """
        Args:
            sig (Tensor or Variable): Tensor of audio of size (c, n)

        Returns:
            spec_f (Tensor or Variable): channels x hops x n_fft (c, l, f), where channels
                is unchanged, hops is the number of hops, and n_fft is the
                number of fourier bins, which should be the window size divided
                by 2 plus 1.

        """
        sig, is_variable = _check_is_variable(sig)

        assert sig.dim() == 2

        spec_f = torch.stft(sig, self.ws, self.hop, self.n_fft,
                            True, True, self.window, self.pad)  # (c, l, n_fft, 2)
        spec_f /= self.window.pow(2).sum().sqrt()
        spec_f = spec_f.pow(2).sum(-1)  # get power of "complex" tensor (c, l, n_fft)
        return spec_f if is_variable else spec_f.data
开发者ID:SsnL,项目名称:audio,代码行数:50,代码来源:transforms.py

示例7: Variable

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
userIdx = trainData.user.values
itemIdx = trainData.item.values
rates = trainData.rate.values

K=20
lambd = 0.00001
learning_rate =1e-6
U = Variable(torch.randn([len(set(userIdx)),K]), requires_grad=True)
P = Variable(torch.randn([len(set(itemIdx)),K]), requires_grad=True)
R = torch.mm(U,P.t())
ratesPred = torch.gather(R.view(1,-1)[0],0,Variable(torch.LongTensor (userIdx * len(set(itemIdx)) + itemIdx)))
diff_op = ratesPred - Variable(torch.FloatTensor(rates))
baseLoss = diff_op.pow(2).sum()

#regularizer = lambd* (U.abs().sum()+P.abs().sum())
regularizer = lambd* (U.pow(2).sum()+P.pow(2).sum())
loss = baseLoss + regularizer 

#optimizer = torch.optim.Adam([U,P], lr = learning_rate)
optimizer = torch.optim.SGD([U,P], lr = learning_rate,momentum = 0.9)
print ('Training')
for i in range(250):
    loss.backward()
    optimizer.step()
    R = torch.mm(U,P.t())
    if i %50 ==0:
        print 'loss:',loss.data.numpy()[0]
    ratesPred = torch.gather(R.view(1,-1)[0],0,Variable(torch.LongTensor (userIdx * len(set(itemIdx)) + itemIdx)))
    diff_op = ratesPred - Variable(torch.FloatTensor(rates))
    baseLoss = diff_op.pow(2).mean()#torch.abs()
    #baseLoss = torch.sum(diff_abs)
开发者ID:robotsl,项目名称:pytorchMF,代码行数:33,代码来源:pytorchMF.py

示例8: len

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
n = 100
NN_val = len(y_val)

for t in range(500*50):

    i = np.random.choice(range(0,NN), size=n, replace=False)

    x = Variable(torch.from_numpy(feat_train[i]).type(dtype), 
                      requires_grad=False)   

    y_pred = (x.view(n,11776)).mm(w1)

    y_original = Variable(torch.from_numpy(y_train[i]).type(dtype), 
                          requires_grad=False)

    loss = lambda1/2*w1.pow(2).sum() +            (y_original.view(n,1)*torch.log(1+torch.exp(-y_pred)) +                  (1-y_original.view(n,1))*torch.log(1+torch.exp(y_pred))).sum()/n
        
       # Print the training loss on random 2000 data points
    if t%500 ==0:
        print(t/500)
        S = 2000
        if S>NN_val:
            S = NN_val
        t = np.random.choice(range(0,NN), size=S, replace=False)
        x_2000 = Variable(torch.from_numpy(feat_train[t]).type(dtype), 
                      requires_grad=False)
        y_2000 = Variable(torch.from_numpy(y_train[t]).type(dtype), 
                          requires_grad=False)
               
        y_pred_2000 = (x_2000.view(S,11776)).mm(w1)
        gap_all = y_2000 - y_pred_2000
开发者ID:EmilyYanW,项目名称:Machine_Learning,代码行数:33,代码来源:8.Computer_Vision_Image_Detection_and_Retrieval.py

示例9: test_pow

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import pow [as 别名]
 def test_pow(self):
     x = Variable(torch.randn(1, 2, 3, 4), requires_grad=True)
     y = Variable(torch.randn(1, 2, 3, 4), requires_grad=True)
     self.assertONNX(lambda x, y: x.pow(y), (x, y))
开发者ID:inkawhich,项目名称:pytorch,代码行数:6,代码来源:test_operators.py


注:本文中的torch.autograd.Variable.pow方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。