本文整理匯總了Python中torch.ones方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.ones方法的具體用法?Python torch.ones怎麽用?Python torch.ones使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.ones方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, input_size, hidden_size, correlation_func=1, do_similarity=False):
super(AttentionScore, self).__init__()
self.correlation_func = correlation_func
self.hidden_size = hidden_size
if correlation_func == 2 or correlation_func == 3:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
if do_similarity:
self.diagonal = Parameter(torch.ones(1, 1, 1) / (hidden_size ** 0.5), requires_grad=False)
else:
self.diagonal = Parameter(torch.ones(1, 1, hidden_size), requires_grad=True)
if correlation_func == 4:
self.linear = nn.Linear(input_size, input_size, bias=False)
if correlation_func == 5:
self.linear = nn.Linear(input_size, hidden_size, bias=False)
示例2: greedy_decode
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def greedy_decode(self, latent, max_len, start_id):
'''
latent: (batch_size, max_src_seq, d_model)
src_mask: (batch_size, 1, max_src_len)
'''
batch_size = latent.size(0)
ys = get_cuda(torch.ones(batch_size, 1).fill_(start_id).long()) # (batch_size, 1)
for i in range(max_len - 1):
# input("==========")
# print("="*10, i)
# print("ys", ys.size()) # (batch_size, i)
# print("tgt_mask", subsequent_mask(ys.size(1)).size()) # (1, i, i)
out = self.decode(latent.unsqueeze(1), to_var(ys), to_var(subsequent_mask(ys.size(1)).long()))
prob = self.generator(out[:, -1])
# print("prob", prob.size()) # (batch_size, vocab_size)
_, next_word = torch.max(prob, dim=1)
# print("next_word", next_word.size()) # (batch_size)
# print("next_word.unsqueeze(1)", next_word.unsqueeze(1).size())
ys = torch.cat([ys, next_word.unsqueeze(1)], dim=1)
# print("ys", ys.size())
return ys[:, 1:]
示例3: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, nx, n_ctx, cfg, scale=False):
super(Attention, self).__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
assert n_state % cfg.nH == 0
self.register_buffer('b', torch.tril(torch.ones(
n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = cfg.nH
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, 1, nx)
self.c_proj = Conv1D(n_state, 1, nx)
self.attn_dropout = nn.Dropout(cfg.adpt)
self.resid_dropout = nn.Dropout(cfg.rdpt)
# dimensions of w: (batch_size x num_heads x seq_length x seq_length)
示例4: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True):
super().__init__(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
self.register_buffer('weight_gamma',
torch.ones(self.out_channels, 1, 1, 1))
self.register_buffer('weight_beta',
torch.zeros(self.out_channels, 1, 1, 1))
示例5: batch_loss
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def batch_loss(encoder, decoder, X, Y, loss):
batch_size = X.shape[0]
enc_state = encoder.begin_state()
enc_outputs, enc_state = encoder(X, enc_state)
# 初始化解碼器的隱藏狀態
dec_state = decoder.begin_state(enc_state)
# 解碼器在最初時間步的輸入是BOS
dec_input = torch.tensor([out_vocab.stoi[BOS]] * batch_size)
# 我們將使用掩碼變量mask來忽略掉標簽為填充項PAD的損失
mask, num_not_pad_tokens = torch.ones(batch_size,), 0
l = torch.tensor([0.0])
for y in Y.permute(1,0): # Y shape: (batch, seq_len)
dec_output, dec_state = decoder(dec_input, dec_state, enc_outputs)
l = l + (mask * loss(dec_output, y)).sum()
dec_input = y # 使用強製教學
num_not_pad_tokens += mask.sum().item()
# 將PAD對應位置的掩碼設成0, 原文這裏是 y != out_vocab.stoi[EOS], 感覺有誤
mask = mask * (y != out_vocab.stoi[PAD]).float()
return l / num_not_pad_tokens
示例6: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, T, opts):
super(LOOLoss, self).__init__()
self.gpu = opts.gpu
self.loo = opts.loo if 'LOO' in opts.method else 0.
self.label_smooth = opts.label_smooth
self.kld_u_const = math.log(len(T['wnids']))
self.relevant = [torch.from_numpy(rel) for rel in T['relevant']]
self.labels_relevant = torch.from_numpy(T['labels_relevant'].astype(np.uint8))
ch_slice = T['ch_slice']
if opts.class_wise:
num_children = T['num_children']
num_supers = len(num_children)
self.class_weight = torch.zeros(ch_slice[-1])
for m, num_ch in enumerate(num_children):
self.class_weight[ch_slice[m]:ch_slice[m+1]] = 1. / (num_ch * num_supers)
else:
self.class_weight = torch.ones(ch_slice[-1]) / ch_slice[-1]
示例7: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def forward(self, x):
# x is of shape: batchSize x dimInFeatures x numberNodesIn
B = x.shape[0]
F = x.shape[1]
Nin = x.shape[2]
# If we have less filter coefficients than the required ones, we need
# to use the copying scheme
if self.M == self.N:
self.h = self.weight
else:
self.h = torch.index_select(self.weight, 4, self.copyNodes)
# And now we add the zero padding
if Nin < self.N:
zeroPad = torch.zeros(B, F, self.N-Nin).type(x.dtype).to(x.device)
x = torch.cat((x, zeroPad), dim = 2)
# Compute the filter output
u = NVGF(self.h, self.S, x, self.bias)
# So far, u is of shape batchSize x dimOutFeatures x numberNodes
# And we want to return a tensor of shape
# batchSize x dimOutFeatures x numberNodesIn
# since the nodes between numberNodesIn and numberNodes are not required
if Nin < self.N:
u = torch.index_select(u, 2, torch.arange(Nin).to(u.device))
return u
示例8: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, condense_factor=None, dropout_rate=0.):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.condense_factor = condense_factor
self.groups = groups
self.dropout_rate = dropout_rate
# Check if given configs are valid
assert self.in_channels % self.groups == 0, "group value is not divisible by input channels"
assert self.in_channels % self.condense_factor == 0, "condensation factor is not divisible by input channels"
assert self.out_channels % self.groups == 0, "group value is not divisible by output channels"
self.batch_norm = nn.BatchNorm2d(in_channels)
self.relu = nn.ReLU(inplace=True)
if self.dropout_rate > 0:
self.dropout = nn.Dropout(self.dropout_rate, inplace=False)
self.conv = nn.Conv2d(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=1, bias=False)
# register conv buffers
self.register_buffer('_count', torch.zeros(1))
self.register_buffer('_stage', torch.zeros(1))
self.register_buffer('_mask', torch.ones(self.conv.weight.size()))
示例9: CORAL
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def CORAL(source, target):
d = source.size(1)
ns, nt = source.size(0), target.size(0)
# source covariance
tmp_s = torch.ones((1, ns)).to(DEVICE) @ source
cs = (source.t() @ source - (tmp_s.t() @ tmp_s) / ns) / (ns - 1)
# target covariance
tmp_t = torch.ones((1, nt)).to(DEVICE) @ target
ct = (target.t() @ target - (tmp_t.t() @ tmp_t) / nt) / (nt - 1)
# frobenius norm
loss = (cs - ct).pow(2).sum().sqrt()
loss = loss / (4 * d * d)
return loss
示例10: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, num_features, eps=1e-5, momentum=0.9, affine=True):
super(_SwitchNorm, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.Tensor(num_features))
self.bias = nn.Parameter(torch.Tensor(num_features))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
self.mean_weight = nn.Parameter(torch.ones(3))
self.var_weight = nn.Parameter(torch.ones(3))
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
示例11: _feature_window_function
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def _feature_window_function(window_type: str,
window_size: int,
blackman_coeff: float,
device: torch.device,
dtype: int,
) -> Tensor:
r"""Returns a window function with the given type and size
"""
if window_type == HANNING:
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype)
elif window_type == HAMMING:
return torch.hamming_window(window_size, periodic=False, alpha=0.54, beta=0.46, device=device, dtype=dtype)
elif window_type == POVEY:
# like hanning but goes to zero at edges
return torch.hann_window(window_size, periodic=False, device=device, dtype=dtype).pow(0.85)
elif window_type == RECTANGULAR:
return torch.ones(window_size, device=device, dtype=dtype)
elif window_type == BLACKMAN:
a = 2 * math.pi / (window_size - 1)
window_function = torch.arange(window_size, device=device, dtype=dtype)
# can't use torch.blackman_window as they use different coefficients
return (blackman_coeff - 0.5 * torch.cos(a * window_function) +
(0.5 - blackman_coeff) * torch.cos(2 * a * window_function)).to(device=device, dtype=dtype)
else:
raise Exception('Invalid window type ' + window_type)
示例12: _fade_in
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def _fade_in(self, waveform_length: int) -> Tensor:
fade = torch.linspace(0, 1, self.fade_in_len)
ones = torch.ones(waveform_length - self.fade_in_len)
if self.fade_shape == "linear":
fade = fade
if self.fade_shape == "exponential":
fade = torch.pow(2, (fade - 1)) * fade
if self.fade_shape == "logarithmic":
fade = torch.log10(.1 + fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5
return torch.cat((fade, ones)).clamp_(0, 1)
示例13: _fade_out
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def _fade_out(self, waveform_length: int) -> Tensor:
fade = torch.linspace(0, 1, self.fade_out_len)
ones = torch.ones(waveform_length - self.fade_out_len)
if self.fade_shape == "linear":
fade = - fade + 1
if self.fade_shape == "exponential":
fade = torch.pow(2, - fade) * (1 - fade)
if self.fade_shape == "logarithmic":
fade = torch.log10(1.1 - fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2 + math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5
return torch.cat((ones, fade)).clamp_(0, 1)
示例14: __init__
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
示例15: generate_pseudo_gtbox
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ones [as 別名]
def generate_pseudo_gtbox(boxes, cls_prob, im_labels):
"""Get proposals from fuse_matrix
inputs are all variables"""
pre_nms_topN = 50
nms_Thresh = 0.1
num_images, num_classes = im_labels.size()
boxes = boxes[:,1:]
assert num_images == 1, 'batch size shoud be equal to 1'
im_labels_tmp = im_labels[0, :]
labelList = im_labels_tmp.data.nonzero().view(-1)
gt_boxes = []
gt_classes = []
gt_scores = []
for i in labelList:
scores, order = cls_prob[:,i].contiguous().view(-1).sort(descending=True)
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
scores = scores[:pre_nms_topN].view(-1, 1)
proposals = boxes[order.data, :]
keep = nms(torch.cat((proposals, scores), 1).data, nms_Thresh)
proposals = proposals[keep, :]
scores = scores[keep,]
gt_boxes.append(proposals)
gt_classes.append(torch.ones(keep.size(0),1)*(i+1)) # return idx=class+1 to include the background
gt_scores.append(scores.view(-1,1))
gt_boxes = torch.cat(gt_boxes)
gt_classes = torch.cat(gt_classes)
gt_scores = torch.cat(gt_scores)
proposals = {'gt_boxes' : gt_boxes,
'gt_classes': gt_classes,
'gt_scores': gt_scores}
# print(gt_boxes.size())
# print(gt_classes.size())
# print(type(gt_boxes))
# print(type(gt_classes))
return torch.cat([gt_boxes,gt_classes],1),proposals
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:43,代碼來源:generate_pseudo_gtbox.py