本文整理汇总了Python中torch.randperm函数的典型用法代码示例。如果您正苦于以下问题:Python randperm函数的具体用法?Python randperm怎么用?Python randperm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了randperm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: random
def random(nin, nout, nto):
nker = nto * nout
tbl = torch.Tensor(nker, 2)
fi = torch.randperm(nin)
frcntr = 0
nfi = math.floor(nin / nto) # number of distinct nto chunks
totbl = tbl.select(1, 1)
frtbl = tbl.select(1, 0)
fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks
ufrtbl = frtbl.unfold(0, nto, nto)
utotbl = totbl.unfold(0, nto, nto)
ufitbl = fitbl.unfold(0, nto, nto)
# start fill_ing frtbl
for i in range(nout): # fro each unit in target map
ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
frcntr += 1
if frcntr - 1 == nfi: # reset fi
fi.copy_(torch.randperm(nin))
frcntr = 1
for tocntr in range(utotbl.size(0)):
utotbl.select(0, tocntr).fill_(tocntr)
return tbl
示例2: main
def main(args):
pyro.clear_param_store()
data = build_linear_dataset(N, p)
if args.cuda:
# make tensors and modules CUDA
data = data.cuda()
softplus.cuda()
regression_model.cuda()
for j in range(args.num_epochs):
if args.batch_size == N:
# use the entire data set
epoch_loss = svi.step(data)
else:
# mini batch
epoch_loss = 0.0
perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
# shuffle data
data = data[perm]
# get indices of each batch
all_batches = get_batch_indices(N, args.batch_size)
for ix, batch_start in enumerate(all_batches[:-1]):
batch_end = all_batches[ix + 1]
batch_data = data[batch_start: batch_end]
epoch_loss += svi.step(batch_data)
if j % 100 == 0:
print("epoch avg loss {}".format(epoch_loss/float(N)))
示例3: train_scene_discriminator
def train_scene_discriminator(x):
netC.zero_grad()
if has_cuda:
target = torch.cuda.FloatTensor(opt.batch_size, 1)
else:
target = torch.FloatTensor(opt.batch_size, 1)
x1 = x[0]
x2 = x[1]
h_p1 = netEP(x1).detach()
h_p2 = netEP(x2).detach()
half = int(opt.batch_size/2)
if has_cuda:
rp = torch.randperm(half).cuda()
else:
rp = torch.randperm(half).cpu()
h_p2[:half] = h_p2[rp]
target[:half] = 1
target[half:] = 0
out = netC([h_p1, h_p2])
bce = bce_criterion(out, Variable(target))
bce.backward()
optimizerC.step()
acc =out[:half].gt(0.5).sum() + out[half:].le(0.5).sum()
return bce.data.cpu().numpy(), acc.data.cpu().numpy()/opt.batch_size
示例4: main
def main():
parser = argparse.ArgumentParser(description="parse args")
parser.add_argument('-n', '--num-epochs', default=1000, type=int)
parser.add_argument('-b', '--batch-size', default=N, type=int)
parser.add_argument('--cuda', action='store_true')
args = parser.parse_args()
data = build_linear_dataset(N, p)
if args.cuda:
# make tensors and modules CUDA
data = data.cuda()
softplus.cuda()
regression_model.cuda()
for j in range(args.num_epochs):
if args.batch_size == N:
# use the entire data set
epoch_loss = svi.step(data)
else:
# mini batch
epoch_loss = 0.0
perm = torch.randperm(N) if not args.cuda else torch.randperm(N).cuda()
# shuffle data
data = data[perm]
# get indices of each batch
all_batches = get_batch_indices(N, args.batch_size)
for ix, batch_start in enumerate(all_batches[:-1]):
batch_end = all_batches[ix + 1]
batch_data = data[batch_start: batch_end]
epoch_loss += svi.step(batch_data)
if j % 100 == 0:
print("epoch avg loss {}".format(epoch_loss/float(N)))
示例5: mixup_data
def mixup_data(x, y, alpha=1.0, use_cuda=True):
if alpha>0.:
lam = np.random.beta(alpha, alpha)
else:
lam = 1.
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam*x + (1-lam)*x[index,:]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam
示例6: pretrain
def pretrain(self, train_data, corrupter, tester):
src, rel, dst = train_data
n_train = len(src)
optimizer = Adam(self.mdl.parameters())
#optimizer = SGD(self.mdl.parameters(), lr=1e-4)
n_epoch = self.config.n_epoch
n_batch = self.config.n_batch
best_perf = 0
for epoch in range(n_epoch):
epoch_loss = 0
rand_idx = t.randperm(n_train)
src = src[rand_idx]
rel = rel[rand_idx]
dst = dst[rand_idx]
src_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst)
src_cuda = src.cuda()
rel_cuda = rel.cuda()
dst_cuda = dst.cuda()
src_corrupted = src_corrupted.cuda()
dst_corrupted = dst_corrupted.cuda()
for s0, r, t0, s1, t1 in batch_by_num(n_batch, src_cuda, rel_cuda, dst_cuda, src_corrupted, dst_corrupted,
n_sample=n_train):
self.mdl.zero_grad()
loss = t.sum(self.mdl.pair_loss(Variable(s0), Variable(r), Variable(t0), Variable(s1), Variable(t1)))
loss.backward()
optimizer.step()
self.mdl.constraint()
epoch_loss += loss.data[0]
logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train)
if (epoch + 1) % self.config.epoch_per_test == 0:
test_perf = tester()
if test_perf > best_perf:
self.save(os.path.join(config().task.dir, self.config.model_file))
best_perf = test_perf
return best_perf
示例7: val
def val(spatial_size, Scale, precomputeStride):
d = pickle.load(open('pickle/test.pickle', 'rb'))
d = torchnet.dataset.ListDataset(d)
randperm = torch.randperm(len(d))
def perm(idx, size):
return randperm[idx]
def merge(tbl):
inp = scn.InputBatch(2, spatial_size)
center = spatial_size.float().view(1, 2) / 2
p = torch.LongTensor(2)
v = torch.FloatTensor([1, 0, 0])
for char in tbl['input']:
inp.addSample()
for stroke in char:
stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01)
stroke += center.expand_as(stroke)
scn.dim_fn(
2,
'drawCurve')(
inp.metadata.ffi,
inp.features,
stroke)
inp.precomputeMetadata(precomputeStride)
return {'input': inp, 'target': torch.LongTensor(tbl['target']) - 1}
bd = torchnet.dataset.BatchDataset(d, 183, perm=perm, merge=merge)
tdi = scn.threadDatasetIterator(bd)
def iter():
randperm = torch.randperm(len(d))
return tdi()
return iter
示例8: train_valid_splitter
def train_valid_splitter(x, y, split, shuffle=True):
''' Generate training and validation tensors from whole dataset data and label tensors
:param x: Data tensor for whole dataset
:type x: torch.Tensor
:param y: Label tensor for whole dataset
:type y: torch.Tensor
:param split: Fraction of dataset to be used for validation
:type split: float
:param shuffle: If True randomize tensor order before splitting else do not randomize
:type shuffle: bool
:return: Training and validation tensors (training data, training labels, validation data, validation labels)
:rtype: tuple
'''
num_samples_x = x.size()[0]
num_valid_samples = math.floor(num_samples_x * split)
if shuffle:
indicies = torch.randperm(num_samples_x)
x, y = x[indicies], y[indicies]
x_val, y_val = x[:num_valid_samples], y[:num_valid_samples]
x, y = x[num_valid_samples:], y[num_valid_samples:]
return x, y, x_val, y_val
示例9: sparse_
def sparse_(tensor, sparsity, std=0.01):
r"""Fills the 2D input `Tensor` as a sparse matrix, where the
non-zero elements will be drawn from the normal distribution
:math:`\mathcal{N}(0, 0.01)`, as described in "Deep learning via
Hessian-free optimization" - Martens, J. (2010).
Args:
tensor: an n-dimensional `torch.Tensor`
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate
the non-zero values
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.sparse_(w, sparsity=0.1)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
rows, cols = tensor.shape
num_zeros = int(math.ceil(sparsity * rows))
with torch.no_grad():
tensor.normal_(0, std)
for col_idx in range(cols):
row_indices = torch.randperm(rows)
zero_indices = row_indices[:num_zeros]
tensor[zero_indices, col_idx] = 0
return tensor
示例10: __call__
def __call__(self, *inputs):
order = th.randperm(inputs[0].dim())
outputs = []
for idx, _input in enumerate(inputs):
_input = _input.index_select(0, order)
outputs.append(_input)
return outputs if idx > 1 else outputs[0]
示例11: collapse_exp_1
def collapse_exp_1(r_feat_val, r_feat, c_feat, pred):
# emd, mmd, acc_t, acc_f
n_mode = c_feat.size(0)
c_feat_repeat = c_feat[pred]
scores = np.zeros((n_mode, 4))
t_feat = r_feat.clone()
index = torch.arange(0, 2000).long()
collapsed_order = torch.randperm(n_mode).long()
Mxx = distance(r_feat_val, r_feat_val, sqrt=False)
for i in range(n_mode):
# Compute Score
Mxy = distance(r_feat_val, t_feat, sqrt=False)
Myy = distance(t_feat, t_feat, sqrt=False)
scores[i, 0] = wasserstein(Mxy, True)
scores[i, 1] = mmd(Mxx, Mxy, Myy, 1)
s = knn(Mxx, Mxy, Myy, 1, True)
scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f
# Do collapse
c = collapsed_order[i]
cidx = index[pred.eq(c)]
t_feat[cidx] = c_feat_repeat[cidx]
return scores
示例12: pretrain
def pretrain(self, train_data, corrupter, tester):
src, rel, dst = train_data
n_train = len(src)
n_epoch = self.config.n_epoch
n_batch = self.config.n_batch
optimizer = Adam(self.mdl.parameters(), weight_decay=self.weight_decay)
best_perf = 0
for epoch in range(n_epoch):
epoch_loss = 0
if epoch % self.config.sample_freq == 0:
rand_idx = t.randperm(n_train)
src = src[rand_idx]
rel = rel[rand_idx]
dst = dst[rand_idx]
src_corrupted, rel_corrupted, dst_corrupted = corrupter.corrupt(src, rel, dst)
src_corrupted = src_corrupted.cuda()
rel_corrupted = rel_corrupted.cuda()
dst_corrupted = dst_corrupted.cuda()
for ss, rs, ts in batch_by_num(n_batch, src_corrupted, rel_corrupted, dst_corrupted, n_sample=n_train):
self.mdl.zero_grad()
label = t.zeros(len(ss)).type(t.LongTensor).cuda()
loss = t.sum(self.mdl.softmax_loss(Variable(ss), Variable(rs), Variable(ts), label))
loss.backward()
optimizer.step()
epoch_loss += loss.data[0]
logging.info('Epoch %d/%d, Loss=%f', epoch + 1, n_epoch, epoch_loss / n_train)
if (epoch + 1) % self.config.epoch_per_test == 0:
test_perf = tester()
if test_perf > best_perf:
self.save(os.path.join(config().task.dir, self.config.model_file))
best_perf = test_perf
return best_perf
示例13: _generate_perms_and_inverses
def _generate_perms_and_inverses(feature_size, num_perms):
perms = [torch.randperm(feature_size)
for _ in range(num_perms)]
inv_perms = [torch.cat([(perm == i).nonzero()
for i in range(feature_size)], 0).squeeze()
for perm in perms]
return perms, inv_perms
示例14: drop_exp_1
def drop_exp_1(r_feat_val, r_feat_train, pred):
# emd, mmd, acc_t, acc_f
n_mode = len(Counter(pred))
scores = np.zeros((n_mode, 4))
t_feat = r_feat_train.clone()
collapsed_order = torch.randperm(n_mode).long()
index = torch.arange(0, r_feat_train.size(0)).long()
collapsed = torch.zeros(r_feat_train.size(0)).byte()
Mxx = distance(r_feat_val, r_feat_val, sqrt=True)
for i in range(n_mode):
# Compute Score
Mxy = distance(r_feat_val, t_feat, sqrt=True)
Myy = distance(t_feat, t_feat, sqrt=True)
scores[i, 0] = wasserstein(Mxy, False)
scores[i, 1] = mmd(Mxx, Mxy, Myy, 1)
s = knn(Mxx, Mxy, Myy, 1, True)
scores[i, 2], scores[i, 3] = s.acc_t, s.acc_f
# Do drop -- fill dropped slots with remaining samples
c = collapsed_order[i]
collapsed[pred.eq(c)] = 1
cidx = index[collapsed.eq(1)]
ncidx = index[collapsed.ne(1)]
if ncidx.dim() == 0 or cidx.dim() == 0 or ncidx.size(0) == 0:
continue
for j in cidx:
copy_idx = np.random.randint(0, ncidx.size(0))
t_feat[j] = t_feat[ncidx[copy_idx]]
return scores
示例15: test
def test(self):
if opt['model'] == 'CharCNN':
X_train = self.dataset.df_train['text_parsed'].values
X_test = self.dataset.df_test['text_parsed'].values
else:
X_train = self.dataset.df_train['ids'].values
X_test = self.dataset.df_test['ids'].values
Y_train = self.dataset.df_train['label'].values
Y_test = self.dataset.df_test['label'].values
m_train = len(X_train)
permutation = torch.randperm(m_train)
accuracies = []
for start_idx in range(0, m_train, opt['batch_size']):
indices = permutation[start_idx:start_idx + opt['batch_size']]
if opt['model'] == 'CharCNN':
X_train_batch, X_train_mask_batch, Y_train_batch = self.create_batch_char(X_train, Y_train, indices)
else:
X_train_batch, X_train_mask_batch, Y_train_batch = self.create_batch(X_train, Y_train, indices)
Y_predict = self.model(X_train_batch, X_train_mask_batch)
loss = self.loss(Y_predict, Y_train_batch)
accuracy, _ = self.calculate_accuracy(Y_train_batch, Y_predict)
accuracies.append(accuracy)
print(loss.cpu().data.numpy(), accuracy)
del X_train_batch, X_train_mask_batch, Y_train_batch, Y_predict
print(sum(accuracies)/len(accuracies))