本文整理汇总了Python中torch.autograd.Variable.repeat方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.repeat方法的具体用法?Python Variable.repeat怎么用?Python Variable.repeat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.autograd.Variable
的用法示例。
在下文中一共展示了Variable.repeat方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import repeat [as 别名]
def forward(self, input, pad_frame_size, patch_shape):
n_patch_h = patch_shape[0][0]
n_patch_w = patch_shape[0][1]
input_padded = Variable(torch.zeros((input.size(0), input.size(
1), input.size(2), pad_frame_size[0][0], pad_frame_size[0][1]))).cuda()
input_padded[:, :, :, 0:input.size(3), 0:input.size(4)] = input
# save dimensions
self.input_padded_size = input_padded.size()
self.input_padded_numel = input_padded.numel()
# This is the compressed frame!
weight = self.weight.repeat(input.size(0), input.size(
1), 1, n_patch_h + 1, n_patch_w + 1)
output = torch.mul(input_padded, weight).sum(2)
if self.noise is not None:
output = self.add_noise(output, input.size(), self.noise)
# Create patches from compressed frame
output_patches = output.unfold(2, self.spatial_size, self.step).unfold(
3, self.spatial_size, self.step)
self.patches_size = (output_patches.size(
1), output_patches.size(2), output_patches.size(3))
output_patches = output_patches.permute(0, 1, 2, 3, 5, 4).contiguous().view(
(output_patches.size(0), -1, self.spatial_size**2))
if self.mean is not None:
mean_var = Variable(torch.from_numpy(self.mean)).float().cuda()
std_var = Variable(torch.from_numpy(self.std)).float().cuda()
mean = mean_var.repeat(output_patches.size(
0), output_patches.size(1), 1)
std = std_var.repeat(output_patches.size(0),
output_patches.size(1), 1)
output_patches = output_patches - mean
output_patches = output_patches / std
return output_patches, output[:, :, 0:input.size(3), 0:input.size(4)]
示例2: Variable
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import repeat [as 别名]
fix_onehot = torch.FloatTensor(fix_length, label_num)
fix_onehot.zero_()
fix_onehot.scatter_(1, fix, 1)
fix_onehot = fix_onehot.view(-1, label_num, 1, 1)
fix_onehot = Variable(fix_onehot).cuda()
fix_onehot_list.append(fix_onehot)
fill = torch.zeros([label_num, label_num, 64, 64])
for i in range(label_num):
fill[i, i, :, :] = 1
fill_list.append(fill)
for i in range(len(fix_onehot_list)):
fix_onehot = fix_onehot_list[i]
repeat_time = total_fix_length / (opt.test_num_per_label * fix_onehot.shape[1])
fix_onehot_list[i] = fix_onehot.repeat(repeat_time, 1, 1, 1)
fix_onehot_concat = torch.cat(fix_onehot_list, 1)
fixed_noise = torch.FloatTensor(total_fix_length, nz, 1, 1).normal_(0, 1)
#fixed_input = torch.cat([fixed_noise, fix_onehot],1)
fixed_noise = Variable(fixed_noise).cuda()
criterion = nn.BCELoss()
if opt.cuda:
G.cuda()
SND.cuda()
criterion.cuda()
示例3: test_repeat_dim_overflow
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import repeat [as 别名]
def test_repeat_dim_overflow(self):
x = Variable(torch.randn(1, 2), requires_grad=True)
self.assertONNX(lambda x: x.repeat(1, 2, 3, 4), x)
示例4: fit
# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import repeat [as 别名]
def fit():
epochs = 50000
hidden_size = 128
emb_size = 128
resample = False
gamma = 0.99
lr = 1e-4
batch_size = 64
use_cuda = True
random_state = 42
num_layers = 1
# data
db = load_db()
# success
jobs = db.jobs_with(state='success')
#jobs = db.all_jobs()
jobs = list(jobs)
#jobs = [j for j in jobs if j['content']['info']['max_depth'] == 5]
X = [j['content']['info']['architecture'] for j in jobs]
R = [max(j['stats']['valid']['accuracy']) if j['state'] == 'success' else -0.1 for j in jobs]
#threshold = 0.8
#X = [x for x, r in zip(X, R) if r > threshold]
#R = [1 for r in R if r > threshold]
R = np.array(R)
vect = Vectorizer(grammar, pad=True)
X = vect.transform(X)
X = [[0] + x for x in X]
X = np.array(X).astype('int32')
print(X.shape)
X, R = shuffle(X, R, random_state=random_state)
n_train = int(len(X) * 0.8)
X_train = X[0:n_train]
R_train = R[0:n_train]
X_test = X[n_train:]
R_test = R[n_train:]
if resample:
X_train, R_train = _resample(X_train, R_train, nb=10)
print('Number of training data : {}'.format(len(X_train)))
# model
vocab_size = len(vect.tok_to_id)
model = RnnModel(
vocab_size=vocab_size,
emb_size=emb_size,
hidden_size=hidden_size,
num_layers=num_layers,
use_cuda=use_cuda,
)
model.vect = vect
model.grammar = grammar
model.apply(_weights_init)
if use_cuda:
model = model.cuda()
optim = Adam(model.parameters(), lr=lr)
# Training
I_train = X_train[:, 0:-1]
O_train = X_train[:, 1:]
I_test = X_test[:, 0:-1]
O_test = X_test[:, 1:]
avg_loss = 0.
avg_precision = 0.
nupdates = 0
best_loss = float('inf')
last_epoch_annealing = 0
last_epoch_improving = 0
for i in range(epochs):
model.train()
for j in range(0, len(I_train), batch_size):
inp = I_train[j:j+batch_size]
out = O_train[j:j+batch_size]
r = R_train[j:j+batch_size]
out = out.flatten()
inp = torch.from_numpy(inp).long()
inp = Variable(inp)
out = torch.from_numpy(out).long()
out = Variable(out)
r = torch.from_numpy(r).float()
r = r.repeat(1, O_train.shape[1])
r = r.view(-1, 1)
r = Variable(r)
r = r.cuda()
if use_cuda:
inp = inp.cuda()
#.........这里部分代码省略.........