本文整理汇总了Python中torch.autograd.Variable.mul_方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.mul_方法的具体用法?Python Variable.mul_怎么用?Python Variable.mul_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.mul_方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: backward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul_ [as 别名]
def backward(ctx, grad_output):
input1, input2, y = ctx.saved_variables
grad_input1 = Variable(input1.data.new(input1.size()).zero_())
grad_input2 = Variable(input1.data.new(input1.size()).zero_())
dist = ((input1 - input2).mul_(-1) * y).add_(ctx.margin)
mask = dist.ge(0)
grad_input1.masked_fill_(mask, 1)
grad_input1 = grad_input1.mul_(-1) * y
grad_input2.masked_fill_(mask, 1) * y
grad_input2 = grad_input2 * y
if ctx.size_average:
grad_input1.div_(y.size(0))
grad_input2.div_(y.size(0))
return grad_input1 * grad_output, grad_input2 * grad_output, None, None, None
示例2: fucn
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul_ [as 别名]
x = torch.zeros(3,10)
torch.expand(ndist.mu_param,3)
ndist.mu_param.expand(10)
torch.ones(10).expand(10,2)
def fucn(**kwargs):
mu = kwargs.get('mu', torch.zeros())
torch.zeros(10).unsqueeze(0)
x = torch.ones(4,3)
x.mul_(torch.zeros(4,3)).add_(torch.ones(4,3))
plt.clf()
input_dim = 1
hidden_dim = 10
num_samples = 10000
x = Variable(torch.randn(num_samples,input_dim))# {{{
plt.subplot(411)
plt.title('x')
plt.hist(x.data.numpy(), bins=30)# }}}
fc1 = torch.nn.Linear(input_dim, hidden_dim)# {{{
fc1.bias.data = 0.5*torch.ones(hidden_dim) + 20.0*torch.randn(hidden_dim)
fc1.weight.data = 10*torch.ones(hidden_dim,input_dim)
h = fc1(x)
plt.subplot(412)
plt.title('fc(x)')
示例3: dev
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul_ [as 别名]
def dev(model, dev_loader, loss_fn, decoder, logger, add_cnn=True, USE_CUDA=True):
'''验证集的计算过程,与train()不同的是不需要反向传播过程,并且需要计算正确率
Args:
model : 模型
dev_loader : 加载验证集的类对象
loss_fn : 损失函数
decoder : 解码类对象,即将网络的输出解码成文本
logger : 日志类对象
USE_CUDA : 是否使用GPU
Returns:
acc * 100 : 字符正确率,如果space不是一个标签的话,则为词正确率
average_loss : 验证集的平均loss
'''
model.eval()
total_cer = 0
total_tokens = 0
total_loss = 0
i = 0
for data in dev_loader:
inputs, targets, input_sizes, input_sizes_list, target_sizes = data
batch_size = inputs.size(0)
if not add_cnn:
inputs = inputs.transpose(0, 1)
inputs = Variable(inputs, requires_grad=False)
targets = Variable(targets, requires_grad=False)
target_sizes = Variable(target_sizes, requires_grad=False)
if USE_CUDA:
inputs = inputs.cuda()
if not add_cnn:
inputs = nn.utils.rnn.pack_padded_sequence(inputs, input_sizes_list)
out, probs = model(inputs, dev=True)
if add_cnn:
max_length = out.size(0)
input_sizes = Variable(input_sizes.mul_(int(max_length)).int(), requires_grad=False)
input_sizes_list = [int(x*max_length) for x in input_sizes_list]
else:
input_sizes = Variable(input_sizes, requires_grad=False)
loss = loss_fn(out, targets, input_sizes, target_sizes)
loss /= batch_size
total_loss += loss.data[0]
probs = probs.data.cpu()
targets = targets.data
target_sizes = target_sizes.data
if decoder.space_idx == -1:
total_cer += decoder.phone_word_error(probs, input_sizes_list, targets, target_sizes)[1]
else:
total_cer += decoder.phone_word_error(probs, input_sizes_list, targets, target_sizes)[0]
total_tokens += sum(target_sizes)
i += 1
acc = 1 - float(total_cer) / total_tokens
average_loss = total_loss / i
return acc*100, average_loss
示例4: train
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import mul_ [as 别名]
def train(model, train_loader, loss_fn, optimizer, logger, add_cnn=True, print_every=20, USE_CUDA=True):
'''训练一个epoch,即将整个训练集跑一遍
Args:
model : 定义的网络模型
train_loader : 加载训练集的类对象
loss_fn : 损失函数,此处为CTCLoss
optimizer : 优化器类对象
logger : 日志类对象
print_every : 每20个batch打印一次loss
USE_CUDA : 是否使用GPU
Returns:
average_loss : 一个epoch的平均loss
'''
model.train()
total_loss = 0
print_loss = 0
i = 0
for data in train_loader:
inputs, targets, input_sizes, input_sizes_list, target_sizes = data
batch_size = inputs.size(0)
if not add_cnn:
inputs = inputs.transpose(0, 1)
inputs = Variable(inputs, requires_grad=False)
targets = Variable(targets, requires_grad=False)
target_sizes = Variable(target_sizes, requires_grad=False)
if USE_CUDA:
inputs = inputs.cuda()
if not add_cnn:
inputs = nn.utils.rnn.pack_padded_sequence(inputs, input_sizes_list)
out = model(inputs)
if add_cnn:
max_length = out.size(0)
input_sizes = Variable(input_sizes.mul_(int(max_length)).int(), requires_grad=False)
input_sizes_list = [int(x*max_length) for x in input_sizes_list]
else:
input_sizes = Variable(input_sizes, requires_grad=False)
loss = loss_fn(out, targets, input_sizes, target_sizes)
loss /= batch_size
print_loss += loss.data[0]
if (i + 1) % print_every == 0:
print('batch = %d, loss = %.4f' % (i+1, print_loss / print_every))
logger.debug('batch = %d, loss = %.4f' % (i+1, print_loss / print_every))
print_loss = 0
total_loss += loss.data[0]
optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(model.parameters(), 400)
optimizer.step()
i += 1
average_loss = total_loss / i
print("Epoch done, average loss: %.4f" % average_loss)
logger.info("Epoch done, average loss: %.4f" % average_loss)
return average_loss