本文整理汇总了Python中torch.zeros方法的典型用法代码示例。如果您正苦于以下问题:Python torch.zeros方法的具体用法?Python torch.zeros怎么用?Python torch.zeros使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.zeros方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def forward(self, features, rois):
batch_size, num_channels, data_height, data_width = features.size()
num_rois = rois.size()[0]
output = torch.zeros(num_rois, num_channels, self.pooled_height, self.pooled_width)
argmax = torch.IntTensor(num_rois, num_channels, self.pooled_height, self.pooled_width).zero_()
if not features.is_cuda:
_features = features.permute(0, 2, 3, 1)
roi_pooling.roi_pooling_forward(self.pooled_height, self.pooled_width, self.spatial_scale,
_features, rois, output)
# output = output.cuda()
else:
output = output.cuda()
argmax = argmax.cuda()
roi_pooling.roi_pooling_forward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
features, rois, output, argmax)
self.output = output
self.argmax = argmax
self.rois = rois
self.feature_size = features.size()
return output
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:24,代码来源:roi_pool.py
示例2: set_conceptnet_inputs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def set_conceptnet_inputs(input_event, relation, text_encoder, max_e1, max_r, force):
abort = False
e1_tokens, rel_tokens, _ = data.conceptnet_data.do_example(text_encoder, input_event, relation, None)
if len(e1_tokens) > max_e1:
if force:
XMB = torch.zeros(1, len(e1_tokens) + max_r).long().to(cfg.device)
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
return {}, True
else:
XMB = torch.zeros(1, max_e1 + max_r).long().to(cfg.device)
XMB[:, :len(e1_tokens)] = torch.LongTensor(e1_tokens)
XMB[:, max_e1:max_e1 + len(rel_tokens)] = torch.LongTensor(rel_tokens)
batch = {}
batch["sequences"] = XMB
batch["attention_mask"] = data.conceptnet_data.make_attention_mask(XMB)
return batch, abort
示例3: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def __init__(self, smiles_pairs, cuda=True):
cls = list(zip(*smiles_pairs))[0]
self.hvocab = sorted( list(set(cls)) )
self.hmap = {x:i for i,x in enumerate(self.hvocab)}
self.vocab = [tuple(x) for x in smiles_pairs] #copy
self.inter_size = [count_inters(x[1]) for x in self.vocab]
self.vmap = {x:i for i,x in enumerate(self.vocab)}
self.mask = torch.zeros(len(self.hvocab), len(self.vocab))
for h,s in smiles_pairs:
hid = self.hmap[h]
idx = self.vmap[(h,s)]
self.mask[hid, idx] = 1000.0
if cuda: self.mask = self.mask.cuda()
self.mask = self.mask - 1000.0
示例4: embed_sub_tree
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def embed_sub_tree(self, tree_tensors, hinput, subtree, is_inter_layer):
subnode, submess = subtree
num_nodes = tree_tensors[0].size(0)
fnode, fmess, agraph, bgraph, cgraph, _ = self.get_sub_tensor(tree_tensors, subtree)
if is_inter_layer:
finput = self.E_i(fnode[:, 1])
hinput = index_select_ND(hinput, 0, cgraph).sum(dim=1)
hnode = self.W_i( torch.cat([finput, hinput], dim=-1) )
else:
finput = self.E_c(fnode[:, 0])
hinput = hinput.index_select(0, subnode)
hnode = self.W_c( torch.cat([finput, hinput], dim=-1) )
if len(submess) == 0:
hmess = fmess
else:
node_buf = torch.zeros(num_nodes, self.hidden_size, device=fmess.device)
node_buf = index_scatter(hnode, node_buf, subnode)
hmess = node_buf.index_select(index=fmess[:, 0], dim=0)
pos_vecs = self.E_pos.index_select(0, fmess[:, 2])
hmess = torch.cat( [hmess, pos_vecs], dim=-1 )
return hnode, hmess, agraph, bgraph
示例5: tensorize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def tensorize(mol_batch, vocab, avocab):
mol_batch = [MolGraph(x) for x in mol_batch]
tree_tensors, tree_batchG = MolGraph.tensorize_graph([x.mol_tree for x in mol_batch], vocab)
graph_tensors, graph_batchG = MolGraph.tensorize_graph([x.mol_graph for x in mol_batch], avocab)
tree_scope = tree_tensors[-1]
graph_scope = graph_tensors[-1]
max_cls_size = max( [len(c) for x in mol_batch for c in x.clusters] )
cgraph = torch.zeros(len(tree_batchG) + 1, max_cls_size).int()
for v,attr in tree_batchG.nodes(data=True):
bid = attr['batch_id']
offset = graph_scope[bid][0]
tree_batchG.nodes[v]['inter_label'] = inter_label = [(x + offset, y) for x,y in attr['inter_label']]
tree_batchG.nodes[v]['cluster'] = cls = [x + offset for x in attr['cluster']]
tree_batchG.nodes[v]['assm_cands'] = [add(x, offset) for x in attr['assm_cands']]
cgraph[v, :len(cls)] = torch.IntTensor(cls)
all_orders = []
for i,hmol in enumerate(mol_batch):
offset = tree_scope[i][0]
order = [(x + offset, y + offset, z) for x,y,z in hmol.order[:-1]] + [(hmol.order[-1][0] + offset, None, 0)]
all_orders.append(order)
tree_tensors = tree_tensors[:4] + (cgraph, tree_scope)
return (tree_batchG, graph_batchG), (tree_tensors, graph_tensors), all_orders
示例6: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.register_buffer('weight_gamma',
torch.ones(self.out_channels, 1, 1, 1))
self.register_buffer('weight_beta',
torch.zeros(self.out_channels, 1, 1, 1))
示例7: m_ggnn
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def m_ggnn(self, h_v, h_w, e_vw, opt={}):
m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))
for w in range(h_w.size(1)):
if torch.nonzero(e_vw[:, w, :].data).size():
for i, el in enumerate(self.args['e_label']):
ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])
parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
self.learn_args[0][i].size(1))
m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
1, 2)), 1, 2)
m_w = torch.squeeze(m_w)
m[:,w,:] = ind.expand_as(m_w)*m_w
return m
示例8: colorize
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def colorize(x):
''' Converts a one-channel grayscale image to a color heatmap image '''
if x.dim() == 2:
torch.unsqueeze(x, 0, out=x)
if x.dim() == 3:
cl = torch.zeros([3, x.size(1), x.size(2)])
cl[0] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[1] = gauss(x,1,.5,.3)
cl[2] = gauss(x,1,.2,.3)
cl[cl.gt(1)] = 1
elif x.dim() == 4:
cl = torch.zeros([x.size(0), 3, x.size(2), x.size(3)])
cl[:,0,:,:] = gauss(x,.5,.6,.2) + gauss(x,1,.8,.3)
cl[:,1,:,:] = gauss(x,1,.5,.3)
cl[:,2,:,:] = gauss(x,1,.2,.3)
return cl
示例9: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def __init__(self, **kwargs):
"""
kwargs:
target_size: int, target size
device: str, device
"""
super(CRF, self).__init__()
for k in kwargs:
self.__setattr__(k, kwargs[k])
device = self.device
# init transitions
self.START_TAG, self.STOP_TAG = -2, -1
init_transitions = torch.zeros(self.target_size + 2, self.target_size + 2, device=device)
init_transitions[:, self.START_TAG] = -10000.0
init_transitions[self.STOP_TAG, :] = -10000.0
self.transitions = nn.Parameter(init_transitions)
示例10: get_params
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def get_params():
def _one(shape):
ts = torch.tensor(np.random.normal(0, 0.01, size=shape), device=device, dtype=torch.float32)
return torch.nn.Parameter(ts, requires_grad=True)
def _three():
return (_one((num_inputs, num_hiddens)),
_one((num_hiddens, num_hiddens)),
torch.nn.Parameter(torch.zeros(num_hiddens, device=device, dtype=torch.float32), requires_grad=True))
W_xz, W_hz, b_z = _three() # 更新门参数
W_xr, W_hr, b_r = _three() # 重置门参数
W_xh, W_hh, b_h = _three() # 候选隐藏层参数
# 输出层参数
W_hq = _one((num_hiddens, num_outputs))
b_q = torch.nn.Parameter(torch.zeros(num_outputs, device=device, dtype=torch.float32), requires_grad=True)
return nn.ParameterList([W_xz, W_hz, b_z, W_xr, W_hr, b_r, W_xh, W_hh, b_h, W_hq, b_q])
示例11: initialize_queue
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def initialize_queue(model_k, device, train_loader):
queue = torch.zeros((0, 128), dtype=torch.float)
queue = queue.to(device)
for batch_idx, (data, target) in enumerate(train_loader):
x_k = data[1]
x_k = x_k.to(device)
k = model_k(x_k)
k = k.detach()
queue = queue_data(queue, k)
queue = dequeue_data(queue, K = 10)
break
return queue
示例12: train
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def train(model_q, model_k, device, train_loader, queue, optimizer, epoch, temp=0.07):
model_q.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(train_loader):
x_q = data[0]
x_k = data[1]
x_q, x_k = x_q.to(device), x_k.to(device)
q = model_q(x_q)
k = model_k(x_k)
k = k.detach()
N = data[0].shape[0]
K = queue.shape[0]
l_pos = torch.bmm(q.view(N,1,-1), k.view(N,-1,1))
l_neg = torch.mm(q.view(N,-1), queue.T.view(-1,K))
logits = torch.cat([l_pos.view(N, 1), l_neg], dim=1)
labels = torch.zeros(N, dtype=torch.long)
labels = labels.to(device)
cross_entropy_loss = nn.CrossEntropyLoss()
loss = cross_entropy_loss(logits/temp, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
total_loss += loss.item()
momentum_update(model_q, model_k)
queue = queue_data(queue, k)
queue = dequeue_data(queue)
total_loss /= len(train_loader.dataset)
print('Train Epoch: {} \tLoss: {:.6f}'.format(epoch, total_loss))
示例13: __getitem__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def __getitem__(self, index):
# images: bs x chan x T x H x W
x = torch.zeros(3, self.opt.max_timesteps, 50, 100)
# load video using read_data() and shove into x
d = self.dataset[index]
# targets: bs-length tensor of targets (each one is the length of the target seq)
frames, y, sub = read_data(d, self.opt, self.vocab_mapping)
x[:, : frames.size(1), :, :] = frames
# input lengths: bs-length tensor of integers, representing
# the number of input timesteps/frames for the given batch element
length = frames.size(1)
return x, y, length, index
示例14: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
示例15: backward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import zeros [as 别名]
def backward(self, grad_output):
assert(self.feature_size is not None and grad_output.is_cuda)
batch_size, num_channels, data_height, data_width = self.feature_size
grad_input = torch.zeros(batch_size, num_channels, data_height, data_width).cuda()
roi_pooling.roi_pooling_backward_cuda(self.pooled_height, self.pooled_width, self.spatial_scale,
grad_output, self.rois, grad_input, self.argmax)
# print grad_input
return grad_input, None
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:14,代码来源:roi_pool.py