本文整理汇总了Python中torch.full_like方法的典型用法代码示例。如果您正苦于以下问题:Python torch.full_like方法的具体用法?Python torch.full_like怎么用?Python torch.full_like使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch
的用法示例。
在下文中一共展示了torch.full_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def forward(self, x):
"""
Normalize activations.
:param x: input activations
:return: normalized activations
"""
if self.training: # compute the pooled moments for the context and save off the moments and context size
alpha = self.sigmoid(self.a * (x.size())[0] + self.b) # compute alpha with context size
batch_mean, batch_var = self._compute_batch_moments(x)
pooled_mean, pooled_var = self._compute_pooled_moments(x, alpha, batch_mean, batch_var,
self._get_augment_moment_fn())
self.context_batch_mean = batch_mean
self.context_batch_var = batch_var
self.context_size = torch.full_like(self.context_size, x.size()[0])
else: # compute the pooled moments for the target
alpha = self.sigmoid(self.a * self.context_size + self.b) # compute alpha with saved context size
pooled_mean, pooled_var = self._compute_pooled_moments(x, alpha, self.context_batch_mean,
self.context_batch_var,
self._get_augment_moment_fn())
return self._normalize(x, pooled_mean, pooled_var) # normalize
示例2: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
dictionary.pad_to_multiple_(8) # often faster if divisible by 8
mask_idx = 0
pad_idx = 1
seq = torch.arange(args.tokens_per_sample) + pad_idx + 1
mask = torch.arange(2, args.tokens_per_sample, 7) # ~15%
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
示例3: drop_word
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def drop_word(self, words):
r"""
按照设定随机将words设置为unknown_index。
:param torch.LongTensor words: batch_size x max_len
:return:
"""
if self.word_dropout > 0 and self.training:
with torch.no_grad():
mask = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float, device=words.device)
mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1
pad_mask = words.ne(self._word_pad_index)
mask = pad_mask.__and__(mask) # pad的位置不为unk
if self._word_sep_index!=-100:
not_sep_mask = words.ne(self._word_sep_index)
mask = mask.__and__(not_sep_mask)
if self._word_cls_index!=-100:
not_cls_mask = words.ne(self._word_cls_index)
mask = mask.__and__(not_cls_mask)
words = words.masked_fill(mask, self._word_unk_index)
return words
示例4: batch_preprocess
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def batch_preprocess(batch, pad_idx, eos_idx, reverse=False):
batch_pos, batch_neg = batch
diff = batch_pos.size(1) - batch_neg.size(1)
if diff < 0:
pad = torch.full_like(batch_neg[:, :-diff], pad_idx)
batch_pos = torch.cat((batch_pos, pad), 1)
elif diff > 0:
pad = torch.full_like(batch_pos[:, :diff], pad_idx)
batch_neg = torch.cat((batch_neg, pad), 1)
pos_styles = torch.ones_like(batch_pos[:, 0])
neg_styles = torch.zeros_like(batch_neg[:, 0])
if reverse:
batch_pos, batch_neg = batch_neg, batch_pos
pos_styles, neg_styles = neg_styles, pos_styles
tokens = torch.cat((batch_pos, batch_neg), 0)
lengths = get_lengths(tokens, eos_idx)
styles = torch.cat((pos_styles, neg_styles), 0)
return tokens, lengths, styles
示例5: test_out
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def test_out(test, reduce, dtype, device):
src = tensor(test['src'], dtype, device)
index = tensor(test['index'], torch.long, device)
dim = test['dim']
expected = tensor(test[reduce], dtype, device)
out = torch.full_like(expected, -2)
getattr(torch_scatter, 'scatter_' + reduce)(src, index, dim, out)
if reduce == 'sum' or reduce == 'add':
expected = expected - 2
elif reduce == 'mean':
expected = out # We can not really test this here.
elif reduce == 'min':
expected = expected.fill_(-2)
elif reduce == 'max':
expected[expected == 0] = -2
else:
raise ValueError
assert torch.all(out == expected)
示例6: test_smoothed_box_prior_log_prob
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def test_smoothed_box_prior_log_prob(self, cuda=False):
device = torch.device("cuda") if cuda else torch.device("cpu")
a, b = torch.zeros(2, device=device), torch.ones(2, device=device)
sigma = 0.1
prior = SmoothedBoxPrior(a, b, sigma)
self.assertTrue(torch.equal(prior.a, a))
self.assertTrue(torch.equal(prior.b, b))
self.assertTrue(torch.equal(prior.sigma, torch.full_like(prior.a, sigma)))
self.assertTrue(torch.all(approx_equal(prior._M, torch.full_like(prior.a, 1.6073))))
t = torch.tensor([0.5, 1.1], device=device)
self.assertAlmostEqual(prior.log_prob(t).item(), -0.9473, places=4)
t = torch.tensor([[0.5, 1.1], [0.1, 0.25]], device=device)
log_prob_expected = torch.tensor([-0.947347, -0.447347], device=t.device)
self.assertTrue(torch.all(approx_equal(prior.log_prob(t), log_prob_expected)))
with self.assertRaises(RuntimeError):
prior.log_prob(torch.zeros(3, device=device))
示例7: sample_action
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def sample_action(self, scores: torch.Tensor) -> rlt.ActorOutput:
assert scores.dim() == 2, (
"scores dim is %d" % scores.dim()
) # batch_size x num_actions
batch_size, num_actions = scores.shape
# pyre-fixme[16]: `Tensor` has no attribute `argmax`.
argmax = F.one_hot(scores.argmax(dim=1), num_actions).bool()
rand_prob = self.epsilon / num_actions
p = torch.full_like(rand_prob, scores)
greedy_prob = 1 - self.epsilon + rand_prob
p[argmax] = greedy_prob
m = torch.distributions.Categorical(probs=p)
raw_action = m.sample()
action = F.one_hot(raw_action, num_actions)
assert action.shape == (batch_size, num_actions)
log_prob = m.log_prob(raw_action)
assert log_prob.shape == (batch_size,)
return rlt.ActorOutput(action=action, log_prob=log_prob)
示例8: next_inputs
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def next_inputs(self, embedding_fn: EmbeddingFn,
time: int, outputs: torch.Tensor,
sample_ids: torch.LongTensor) -> NextInputTuple:
del outputs # unused by next_inputs_fn
if self._use_finish:
hard_ids = torch.argmax(sample_ids, dim=-1)
finished = (hard_ids == self._end_token)
else:
finished = torch.zeros_like(self._start_tokens, dtype=torch_bool)
if self._stop_gradient:
sample_ids = sample_ids.detach()
indices = torch.arange(sample_ids.size(-1), device=sample_ids.device)
times = torch.full_like(indices, time + 1)
embeddings = embedding_fn(indices, times)
next_inputs = torch.matmul(sample_ids, embeddings)
return (finished, next_inputs)
示例9: drop_word
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def drop_word(self, words):
"""
按照设定随机将words设置为unknown_index。
:param torch.LongTensor words: batch_size x max_len
:return:
"""
if self.word_dropout > 0 and self.training:
with torch.no_grad():
if self._word_sep_index: # 不能drop sep
sep_mask = words.eq(self._word_sep_index)
mask = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float, device=words.device)
mask = torch.bernoulli(mask).eq(1) # dropout_word越大,越多位置为1
pad_mask = words.ne(0)
mask = pad_mask.__and__(mask) # pad的位置不为unk
words = words.masked_fill(mask, self._word_unk_index)
if self._word_sep_index:
words.masked_fill_(sep_mask, self._word_sep_index)
return words
示例10: _tensorize_baseline
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def _tensorize_baseline(
inputs: Tuple[Tensor, ...], baselines: Tuple[Union[int, float, Tensor], ...]
) -> Tuple[Tensor, ...]:
def _tensorize_single_baseline(baseline, input):
if isinstance(baseline, (int, float)):
return torch.full_like(input, baseline)
if input.shape[0] > baseline.shape[0] and baseline.shape[0] == 1:
return torch.cat([baseline] * input.shape[0])
return baseline
assert isinstance(inputs, tuple) and isinstance(baselines, tuple), (
"inputs and baselines must"
"have tuple type but found baselines: {} and inputs: {}".format(
type(baselines), type(inputs)
)
)
return tuple(
_tensorize_single_baseline(baseline, input)
for baseline, input in zip(baselines, inputs)
)
示例11: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = dictionary.add_symbol('<mask>')
assert len(dictionary) % 8 == 0
mask_idx = 0
pad_idx = 1
seq = torch.arange(args.tokens_per_sample) + pad_idx + 1
mask = torch.arange(2, args.tokens_per_sample, 7) # ~15%
src = seq.clone()
src[mask] = mask_idx
tgt = torch.full_like(seq, pad_idx)
tgt[mask] = seq[mask]
self.dummy_src = src
self.dummy_tgt = tgt
示例12: _get_model
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def _get_model(self, dtype=torch.float):
state_dict = {
"mean_module.constant": torch.tensor([-0.0066]),
"covar_module.raw_outputscale": torch.tensor(1.0143),
"covar_module.base_kernel.raw_lengthscale": torch.tensor([[-0.99]]),
"covar_module.base_kernel.lengthscale_prior.concentration": torch.tensor(
3.0
),
"covar_module.base_kernel.lengthscale_prior.rate": torch.tensor(6.0),
"covar_module.outputscale_prior.concentration": torch.tensor(2.0),
"covar_module.outputscale_prior.rate": torch.tensor(0.1500),
}
train_x = torch.linspace(0, 1, 10, device=self.device, dtype=dtype).unsqueeze(
-1
)
train_y = torch.sin(train_x * (2 * math.pi))
noise = torch.tensor(NEI_NOISE, device=self.device, dtype=dtype)
train_y += noise
train_yvar = torch.full_like(train_y, 0.25 ** 2)
model = FixedNoiseGP(train_X=train_x, train_Y=train_y, train_Yvar=train_yvar)
model.load_state_dict(state_dict)
model.to(train_x)
model.eval()
return model
示例13: __init__
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def __init__(self, probs=None, logits=None, validate_args=None):
if probs is not None:
new_probs = torch.zeros_like(probs, dtype=torch.float)
new_probs[probs == probs.max(dim=-1, keepdim=True)[0]] = 1.0
probs = new_probs
elif logits is not None:
new_logits = torch.full_like(logits, -1e8, dtype=torch.float)
new_logits[logits == logits.max(dim=-1, keepdim=True)[0]] = 1.0
logits = new_logits
super().__init__(probs=probs, logits=logits, validate_args=validate_args)
示例14: _filter_boxes
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def _filter_boxes(self, bbox, last, gt):
"""Only keep boxes with positive height and width, and not-gt.
"""
last_bbox = last.bbox
gt_bbox = gt.bbox
ws = bbox[:, 2] - bbox[:, 0] + 1
hs = bbox[:, 3] - bbox[:, 1] + 1
for i in range(gt_bbox.shape[0]):
last_bbox = torch.where(last_bbox == gt_bbox[i], torch.full_like(last_bbox, -1), last_bbox)
s = sum([last_bbox[:, 0], last_bbox[:, 1], last_bbox[:, 2], last_bbox[:, 3]])
keep = np.where((ws.cpu() > 0) & (hs.cpu() > 0) & (s.cpu() > 0))[0]
return keep
示例15: _match_to_lbl
# 需要导入模块: import torch [as 别名]
# 或者: from torch import full_like [as 别名]
def _match_to_lbl(anchors, bbx, match):
pos, neg = match >= 0, match == -1
# Objectness labels from matching tensor
obj_lbl = torch.full_like(match, -1)
obj_lbl[neg] = 0
obj_lbl[pos] = 1
# Bounding box regression labels from matching tensor
bbx_lbl = anchors.new_zeros(len(bbx), anchors.size(0), anchors.size(1))
for i, (pos_i, bbx_i, match_i) in enumerate(zip(pos, bbx, match)):
if pos_i.any():
bbx_lbl[i, pos_i] = calculate_shift(anchors[pos_i], bbx_i[match_i[pos_i]])
return obj_lbl, bbx_lbl