本文整理汇总了Python中torch.randint方法的典型用法代码示例。如果您正苦于以下问题:Python torch.randint方法的具体用法?Python torch.randint怎么用?Python torch.randint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.randint方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_batch
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def get_batch(source, i, train):
if train:
i = torch.randint(low=0, high=(len(source) - args.bptt), size=(1,)).long().item()
seq_len = args.bptt
target = source[i + 1:i + 1 + seq_len].t()
else:
seq_len = min(args.bptt, len(source) - 1 - i)
target = source[i + seq_len, :]
data = source[i:i + seq_len].t()
data_mask = (data != pad).unsqueeze(-2)
target_mask = make_std_mask(data.long())
# reshape target to match what cross_entropy expects
target = target.contiguous().view(-1)
return data, target, data_mask, target_mask
开发者ID:nadavbh12,项目名称:Character-Level-Language-Modeling-with-Deeper-Self-Attention-pytorch,代码行数:20,代码来源:main.py
示例2: test_adam_poincare
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_adam_poincare(params):
torch.manual_seed(44)
manifold = geoopt.PoincareBall()
ideal = manifold.random(10, 2)
start = manifold.random(10, 2)
start = geoopt.ManifoldParameter(start, manifold=manifold)
def closure():
idx = torch.randint(10, size=(3,))
start_select = torch.nn.functional.embedding(idx, start, sparse=True)
ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True)
optim.zero_grad()
loss = manifold.dist2(start_select, ideal_select).sum()
loss.backward()
assert start.grad.is_sparse
return loss.item()
optim = geoopt.optim.SparseRiemannianSGD([start], **params)
for _ in range(2000):
optim.step(closure)
np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)
示例3: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def __init__(self, thresh=1e-8, projDim=8192, input_dim=512):
super(CBP, self).__init__()
self.thresh = thresh
self.projDim = projDim
self.input_dim = input_dim
self.output_dim = projDim
torch.manual_seed(1)
self.h_ = [
torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long),
torch.randint(0, self.output_dim, (self.input_dim,),dtype=torch.long)
]
self.weights_ = [
(2 * torch.randint(0, 2, (self.input_dim,)) - 1).float(),
(2 * torch.randint(0, 2, (self.input_dim,)) - 1).float()
]
indices1 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
self.h_[0].reshape(1, -1)), dim=0)
indices2 = torch.cat((torch.arange(input_dim, dtype=torch.long).reshape(1, -1),
self.h_[1].reshape(1, -1)), dim=0)
self.sparseM = [
torch.sparse.FloatTensor(indices1, self.weights_[0], torch.Size([self.input_dim, self.output_dim])).to_dense(),
torch.sparse.FloatTensor(indices2, self.weights_[1], torch.Size([self.input_dim, self.output_dim])).to_dense(),
]
示例4: test_adam_poincare
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_adam_poincare(params):
torch.manual_seed(44)
manifold = geoopt.PoincareBall()
ideal = manifold.random(10, 2)
start = manifold.random(10, 2)
start = geoopt.ManifoldParameter(start, manifold=manifold)
def closure():
idx = torch.randint(10, size=(3,))
start_select = torch.nn.functional.embedding(idx, start, sparse=True)
ideal_select = torch.nn.functional.embedding(idx, ideal, sparse=True)
optim.zero_grad()
loss = manifold.dist2(start_select, ideal_select).sum()
loss.backward()
assert start.grad.is_sparse
return loss.item()
optim = geoopt.optim.SparseRiemannianAdam([start], **params)
for _ in range(2000):
optim.step(closure)
np.testing.assert_allclose(start.data, ideal, atol=1e-5, rtol=1e-5)
示例5: neg_sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def neg_sample(self, batch):
batch = batch.repeat(self.walks_per_node * self.num_negative_samples)
rws = [batch]
for i in range(self.walk_length):
keys = self.metapath[i % len(self.metapath)]
batch = torch.randint(0, self.num_nodes_dict[keys[-1]],
(batch.size(0), ), dtype=torch.long)
rws.append(batch)
rw = torch.stack(rws, dim=-1)
rw.add_(self.offset.view(1, -1))
walks = []
num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
for j in range(num_walks_per_rw):
walks.append(rw[:, j:j + self.context_size])
return torch.cat(walks, dim=0)
示例6: test_gnn_explainer
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_gnn_explainer():
model = Net()
explainer = GNNExplainer(model, log=False)
assert explainer.__repr__() == 'GNNExplainer()'
x = torch.randn(8, 3)
edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7],
[1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6]])
y = torch.randint(0, 6, (8, ), dtype=torch.long)
node_feat_mask, edge_mask = explainer.explain_node(2, x, edge_index)
assert node_feat_mask.size() == (x.size(1), )
assert node_feat_mask.min() >= 0 and node_feat_mask.max() <= 1
assert edge_mask.size() == (edge_index.size(1), )
assert edge_mask.min() >= 0 and edge_mask.max() <= 1
explainer.visualize_subgraph(2, edge_index, edge_mask, threshold=None)
explainer.visualize_subgraph(2, edge_index, edge_mask, threshold=0.5)
explainer.visualize_subgraph(2, edge_index, edge_mask, y=y, threshold=None)
explainer.visualize_subgraph(2, edge_index, edge_mask, y=y, threshold=0.5)
示例7: test_deep_graph_infomax
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_deep_graph_infomax():
def corruption(z):
return z + 1
model = DeepGraphInfomax(
hidden_channels=16,
encoder=lambda x: x,
summary=lambda z, *args: z.mean(dim=0),
corruption=lambda x: x + 1)
assert model.__repr__() == 'DeepGraphInfomax(16)'
x = torch.ones(20, 16)
pos_z, neg_z, summary = model(x)
assert pos_z.size() == (20, 16) and neg_z.size() == (20, 16)
assert summary.size() == (16, )
loss = model.loss(pos_z, neg_z, summary)
assert 0 <= loss.item()
acc = model.test(
torch.ones(20, 16), torch.randint(10, (20, )), torch.ones(20, 16),
torch.randint(10, (20, )))
assert 0 <= acc and acc <= 1
示例8: test_node2vec
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_node2vec():
edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
model = Node2Vec(edge_index, embedding_dim=16, walk_length=2,
context_size=2)
assert model.__repr__() == 'Node2Vec(3, 16)'
z = model(torch.arange(3))
assert z.size() == (3, 16)
pos_rw, neg_rw = model.sample(torch.arange(3))
loss = model.loss(pos_rw, neg_rw)
assert 0 <= loss.item()
acc = model.test(torch.ones(20, 16), torch.randint(10, (20, )),
torch.ones(20, 16), torch.randint(10, (20, )))
assert 0 <= acc and acc <= 1
示例9: farthest_point_sample
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
mask = dist < distance
distance[mask] = dist[mask]
farthest = torch.max(distance, -1)[1]
return centroids
示例10: forward_attr
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def forward_attr(self, e, mode='left'):
assert mode == 'left' or mode == 'right'
e_emb = self.emb_e(e.view(-1))
# Sample one numerical literal for each entity
e_attr = self.numerical_literals[e.view(-1)]
m = len(e_attr)
idxs = torch.randint(self.n_num_lit, size=(m,)).cuda()
attr_emb = self.emb_attr(idxs)
inputs = torch.cat([e_emb, attr_emb], dim=1)
pred = self.attr_net_left(inputs) if mode == 'left' else self.attr_net_right(inputs)
target = e_attr[range(m), idxs]
return pred, target
示例11: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def forward(self, pos):
r"""Memory allocation and sampling
Parameters
----------
pos : tensor
The positional tensor of shape (B, N, C)
Returns
-------
tensor of shape (B, self.npoints)
The sampled indices in each batch.
"""
device = pos.device
B, N, C = pos.shape
pos = pos.reshape(-1, C)
dist = th.zeros((B * N), dtype=pos.dtype, device=device)
start_idx = th.randint(0, N - 1, (B, ), dtype=th.long, device=device)
result = th.zeros((self.npoints * B), dtype=th.long, device=device)
farthest_point_sampler(pos, B, self.npoints, dist, start_idx, result)
return result.reshape(B, self.npoints)
示例12: add_insertion_noise
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def add_insertion_noise(self, tokens, p):
if p == 0.0:
return tokens
num_tokens = len(tokens)
n = int(math.ceil(num_tokens * p))
noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1
noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)
noise_mask[noise_indices] = 1
result = torch.LongTensor(n + len(tokens)).fill_(-1)
num_random = int(math.ceil(n * self.random_ratio))
result[noise_indices[num_random:]] = self.mask_idx
result[noise_indices[:num_random]] = torch.randint(low=1, high=len(self.vocab), size=(num_random,))
result[~noise_mask] = tokens
assert (result >= 0).all()
return result
示例13: test_cutmix
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def test_cutmix(self):
random_image = torch.rand(5, 3, 100, 100)
state = {torchbearer.X: random_image, torchbearer.Y_TRUE: torch.randint(10, (5,)).long(), torchbearer.DEVICE: 'cpu'}
torch.manual_seed(7)
co = CutMix(0.25, classes=10)
co.on_sample(state)
reg_img = state[torchbearer.X].view(-1)
x = [72, 83, 18, 96, 40]
y = [8, 17, 62, 30, 66]
perm = [0, 4, 3, 2, 1]
sz = 3
rnd = random_image.clone().numpy()
known_cut = random_image.clone().numpy()
known_cut[0, :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2] = rnd[perm[0], :, y[0]-sz//2:y[0]+sz//2, x[0]-sz//2:x[0]+sz//2]
known_cut[1, :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2] = rnd[perm[1], :, y[1]-sz//2:y[1]+sz//2, x[1]-sz//2:x[1]+sz//2]
known_cut[2, :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2] = rnd[perm[2], :, y[2]-sz//2:y[2]+sz//2, x[2]-sz//2:x[2]+sz//2]
known_cut[3, :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2] = rnd[perm[3], :, y[3]-sz//2:y[3]+sz//2, x[3]-sz//2:x[3]+sz//2]
known_cut[4, :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2] = rnd[perm[4], :, y[4]-sz//2:y[4]+sz//2, x[4]-sz//2:x[4]+sz//2]
known_cut = torch.from_numpy(known_cut)
known_cut = known_cut.view(-1)
diff = (torch.abs(known_cut-reg_img) > 1e-4).any()
self.assertTrue(diff.item() == 0)
示例14: example_mdpooling
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def example_mdpooling():
input = torch.randn(2, 32, 64, 64).cuda()
input.requires_grad = True
batch_inds = torch.randint(2, (20, 1)).cuda().float()
x = torch.randint(256, (20, 1)).cuda().float()
y = torch.randint(256, (20, 1)).cuda().float()
w = torch.randint(64, (20, 1)).cuda().float()
h = torch.randint(64, (20, 1)).cuda().float()
rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)
# mdformable pooling (V2)
dpooling = DCNPooling(spatial_scale=1.0 / 4,
pooled_size=7,
output_dim=32,
no_trans=False,
group_size=1,
trans_std=0.1,
deform_fc_dim=1024).cuda()
dout = dpooling(input, rois)
target = dout.new(*dout.size())
target.data.uniform_(-0.1, 0.1)
error = (target - dout).mean()
error.backward()
print(dout.shape)
示例15: build_fss_keys
# 需要导入模块: import torch [as 别名]
# 或者: from torch import randint [as 别名]
def build_fss_keys(self, type_op):
"""
The builder to generate functional keys for Function Secret Sharing (FSS)
"""
if type_op == "eq":
fss_class = sy.frameworks.torch.mpc.fss.DPF
elif type_op == "comp":
fss_class = sy.frameworks.torch.mpc.fss.DIF
else:
raise ValueError(f"type_op {type_op} not valid")
n = sy.frameworks.torch.mpc.fss.n
def build_separate_fss_keys(n_party, n_instances=100):
assert (
n_party == 2
), f"The FSS protocol only works for 2 workers, {n_party} were provided."
alpha, s_00, s_01, *CW = fss_class.keygen(n_values=n_instances)
# simulate sharing TODO clean this
mask = th.randint(0, 2 ** n, alpha.shape)
return [((alpha - mask) % 2 ** n, s_00, *CW), (mask, s_01, *CW)]
return build_separate_fss_keys