本文整理汇总了Python中torch.nn.functional.one_hot方法的典型用法代码示例。如果您正苦于以下问题:Python functional.one_hot方法的具体用法?Python functional.one_hot怎么用?Python functional.one_hot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.nn.functional
的用法示例。
在下文中一共展示了functional.one_hot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_body
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def _get_body(self, x, target):
cos_t = torch.gather(x, 1, target.unsqueeze(1)) # cos(theta_yi)
if self.easy_margin:
cond = torch.relu(cos_t)
else:
cond_v = cos_t - self.threshold
cond = torch.relu(cond_v)
cond = cond.bool()
# Apex would convert FP16 to FP32 here
# cos(theta_yi + m)
new_zy = torch.cos(torch.acos(cos_t) + self.m).type(cos_t.dtype)
if self.easy_margin:
zy_keep = cos_t
else:
zy_keep = cos_t - self.mm # (cos(theta_yi) - sin(pi - m)*m)
new_zy = torch.where(cond, new_zy, zy_keep)
diff = new_zy - cos_t # cos(theta_yi + m) - cos(theta_yi)
gt_one_hot = F.one_hot(target, num_classes=self.classes)
body = gt_one_hot * diff
return body
示例2: intersection_and_union
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def intersection_and_union(pred, target, num_classes, batch=None):
r"""Computes intersection and union of predictions.
Args:
pred (LongTensor): The predictions.
target (LongTensor): The targets.
num_classes (int): The number of classes.
batch (LongTensor): The assignment vector which maps each pred-target
pair to an example.
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
"""
pred, target = F.one_hot(pred, num_classes), F.one_hot(target, num_classes)
if batch is None:
i = (pred & target).sum(dim=0)
u = (pred | target).sum(dim=0)
else:
i = scatter_add(pred & target, batch, dim=0)
u = scatter_add(pred | target, batch, dim=0)
return i, u
示例3: parse_sdf
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def parse_sdf(src):
src = src.split('\n')[3:]
num_atoms, num_bonds = [int(item) for item in src[0].split()[:2]]
atom_block = src[1:num_atoms + 1]
pos = parse_txt_array(atom_block, end=3)
x = torch.tensor([elems[item.split()[3]] for item in atom_block])
x = F.one_hot(x, num_classes=len(elems))
bond_block = src[1 + num_atoms:1 + num_atoms + num_bonds]
row, col = parse_txt_array(bond_block, end=2, dtype=torch.long).t() - 1
row, col = torch.cat([row, col], dim=0), torch.cat([col, row], dim=0)
edge_index = torch.stack([row, col], dim=0)
edge_attr = parse_txt_array(bond_block, start=2, end=3) - 1
edge_attr = torch.cat([edge_attr, edge_attr], dim=0)
edge_index, edge_attr = coalesce(edge_index, edge_attr, num_atoms,
num_atoms)
data = Data(x=x, edge_index=edge_index, edge_attr=edge_attr, pos=pos)
return data
示例4: focal_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def focal_loss(self, x, y):
'''Focal loss.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
alpha = 0.25
gamma = 2
t = F.one_hot(y.data, 1+self.num_classes) # [N,21]
t = t[:,1:] # exclude background
t = Variable(t)
p = x.sigmoid()
pt = p*t + (1-p)*(1-t) # pt = p if t > 0 else 1-p
w = alpha*t + (1-alpha)*(1-t) # w = alpha if t > 0 else 1-alpha
w = w * (1-pt).pow(gamma)
return F.binary_cross_entropy_with_logits(x, t, w, reduction='sum')
示例5: focal_loss_alt
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def focal_loss_alt(self, x, y, alpha=0.25, gamma=1.5):
'''Focal loss alternative.
Args:
x: (tensor) sized [N,D].
y: (tensor) sized [N,].
Return:
(tensor) focal loss.
'''
t = F.one_hot(y, self.num_classes+1)
t = t[:,1:]
xt = x*(2*t-1) # xt = x if t > 0 else -x
pt = (2*xt+1).sigmoid()
pt = pt.clamp(1e-7, 1.0)
w = (0+alpha)*(0+t) + (1-alpha)*(1-t)
loss = -w*pt.log() / gamma
return loss.sum()
示例6: forward
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def forward(self, input, target):
"""
Args:
input: [B * T, V]
target: [B * T]
Returns:
cross entropy: [1]
"""
mask = (target == self.ignore_index).unsqueeze(-1)
q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)
u = 1.0 / self.vocab_size
q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u
q_prime = q_prime.masked_fill(mask, 0)
ce = self.cross_entropy_with_logits(q_prime, input)
if self.reduction == 'mean':
lengths = torch.sum(target != self.ignore_index)
return ce.sum() / lengths
elif self.reduction == 'sum':
return ce.sum()
else:
raise NotImplementedError
示例7: _sqrt_hessian_sampled
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
self._check_2nd_order_parameters(module)
M = mc_samples
C = module.input0.shape[1]
probs = self._get_probs(module)
V_dim = 0
probs_unsqueezed = probs.unsqueeze(V_dim).repeat(M, 1, 1)
multi = multinomial(probs, M, replacement=True)
classes = one_hot(multi, num_classes=C)
classes = einsum("nvc->vnc", classes).float()
sqrt_mc_h = (probs_unsqueezed - classes) / sqrt(M)
if module.reduction == "mean":
N = module.input0.shape[0]
sqrt_mc_h /= sqrt(N)
return sqrt_mc_h
示例8: test_multi_class_seg_2d
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def test_multi_class_seg_2d(self):
num_classes = 6 # labels 0 to 5
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
target_one_hot = F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2) # test one hot
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
focal_loss_good = float(loss(pred_very_good, target_one_hot).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
示例9: test_compute_policy_gradient_loss
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def test_compute_policy_gradient_loss(self):
T, B, N = self.logits.shape
# Calculate the the cross entropy loss, with the formula:
# loss = -sum_over_j(y_j * log(p_j))
# Where:
# - `y_j` is whether the action corrisponding to index j has been taken or not,
# (hence y is a one-hot-array of size == number of actions).
# - `p_j` is the value of the sofmax logit corresponding to the jth action.
# In our implementation, we also multiply for the advantages.
labels = F.one_hot(torch.from_numpy(self.actions), num_classes=N).numpy()
cross_entropy_loss = -labels * np.log(_softmax(self.logits))
ground_truth_value = np.sum(
cross_entropy_loss * self.advantages.reshape(T, B, 1)
)
calculated_value = polybeast.compute_policy_gradient_loss(
torch.from_numpy(self.logits),
torch.from_numpy(self.actions),
torch.from_numpy(self.advantages),
)
assert_allclose(ground_truth_value, calculated_value.item())
示例10: cross_entropy
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
label_smooth=None):
# element-wise losses
if label_smooth is None:
loss = F.cross_entropy(pred, label, reduction='none')
else:
num_classes = pred.size(1)
target = F.one_hot(label, num_classes).type_as(pred)
target = target.sub_(label_smooth).clamp_(0).add_(label_smooth / num_classes)
loss = F.kl_div(pred.log_softmax(1), target, reduction='none').sum(1)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
示例11: train_step
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def train_step(model, state_transitions, tgt, num_actions):
if len(state_transitions) <=0:
print("empty state transitions")
return
cur_states = torch.stack( ([torch.Tensor(s.state) for s in state_transitions]) ).to(model.device)
rewards = torch.stack( ([torch.Tensor([s.reward]) for s in state_transitions]) ).to(model.device)
Qs = torch.stack( ([torch.Tensor([s.qval]) for s in state_transitions]) ).to(model.device)
mask = torch.stack(([torch.Tensor([0]) if s.done else torch.Tensor([1]) for s in state_transitions])).to(model.device)
next_states = torch.stack( ([torch.Tensor(s.next_state) for s in state_transitions]) ).to(model.device)
actions = [s.action for s in state_transitions]
# import ipdb; ipdb.set_trace()
with torch.no_grad():
# actual_Q_values = Qs
pred_qvals_next = model(next_states).max(-1)[0]
model.opt.zero_grad()
pred_qvals = model(cur_states)
one_hot_actions = F.one_hot(torch.LongTensor(actions),num_actions).to(model.device)
# loss = torch.mean(torch.sqrt((torch.sum(pred_qvals*one_hot_actions,-1) - actual_Q_values.view(-1) )**2)).to(model.device)
# loss = F.smooth_l1_loss(torch.sum(pred_qvals*one_hot_actions,-1), actual_Q_values.view(-1) )
loss = F.smooth_l1_loss(torch.sum(pred_qvals*one_hot_actions,-1), rewards.view(-1)+0.99*mask[:,0]*pred_qvals_next.view(-1) ).mean()
loss.backward()
model.opt.step()
return loss
示例12: train_step
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def train_step(model, state_transitions, tgt, num_actions, gamma):
if len(state_transitions) <=0:
print("empty state transitions")
return
cur_states = torch.stack( ([torch.Tensor(s.state) for s in state_transitions]) ).to(model.device)
rewards = torch.stack( ([torch.Tensor([s.reward]) for s in state_transitions]) ).to(model.device)
Qs = torch.stack( ([torch.Tensor([s.qval]) for s in state_transitions]) ).to(model.device)
mask = torch.stack(([torch.Tensor([0]) if s.done else torch.Tensor([1]) for s in state_transitions])).to(model.device)
next_states = torch.stack( ([torch.Tensor(s.next_state) for s in state_transitions]) ).to(model.device)
actions = [s.action for s in state_transitions]
# import ipdb; ipdb.set_trace()
with torch.no_grad():
actual_Q_values = Qs
# import ipdb; ipdb.set_trace()
pred_qvals_next = model(next_states.view(len(state_transitions),3,160,140*3)).max(-1)[0]
model.opt.zero_grad()
pred_qvals = model(cur_states.view(len(state_transitions),3,160,140*3))
one_hot_actions = F.one_hot(torch.LongTensor(actions),num_actions).to(model.device)
# loss = torch.mean(torch.sqrt((torch.sum(pred_qvals*one_hot_actions,-1) - actual_Q_values.view(-1) )**2)).to(model.device)
loss = F.smooth_l1_loss(torch.sum(pred_qvals*one_hot_actions,-1), actual_Q_values.view(-1) )
# loss = F.smooth_l1_loss(torch.sum(pred_qvals*one_hot_actions,-1), rewards.view(-1)+gamma*mask[:,0]*pred_qvals_next.view(-1) ).mean()
loss.backward()
model.opt.step()
return loss
示例13: _sample_layer_choice
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def _sample_layer_choice(self, mutable):
self._lstm_next_step()
logit = self.soft(self._h[-1])
if self.temperature is not None:
logit /= self.temperature
if self.tanh_constant is not None:
logit = self.tanh_constant * torch.tanh(logit)
if mutable.key in self.bias_dict:
logit += self.bias_dict[mutable.key]
branch_id = torch.multinomial(F.softmax(logit, dim=-1), 1).view(-1)
log_prob = self.cross_entropy_loss(logit, branch_id)
self.sample_log_prob += self.entropy_reduction(log_prob)
entropy = (log_prob * torch.exp(-log_prob)).detach() # pylint: disable=invalid-unary-operand-type
self.sample_entropy += self.entropy_reduction(entropy)
self._inputs = self.embedding(branch_id)
return F.one_hot(branch_id, num_classes=self.max_layer_choice).bool().view(-1)
示例14: sample_final
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def sample_final(self):
"""
Generate the final chosen architecture.
Returns
-------
dict
the choice of each mutable, i.e., LayerChoice
"""
result = dict()
for mutable in self.undedup_mutables:
assert isinstance(mutable, LayerChoice)
index, _ = mutable.registered_module.chosen_index
# pylint: disable=not-callable
result[mutable.key] = F.one_hot(torch.tensor(index), num_classes=len(mutable)).view(-1).bool()
return result
示例15: sample_search
# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import one_hot [as 别名]
def sample_search(self):
"""
Sample a random candidate.
"""
result = dict()
for mutable in self.mutables:
if isinstance(mutable, LayerChoice):
gen_index = torch.randint(high=len(mutable), size=(1, ))
result[mutable.key] = F.one_hot(gen_index, num_classes=len(mutable)).view(-1).bool()
elif isinstance(mutable, InputChoice):
if mutable.n_chosen is None:
result[mutable.key] = torch.randint(high=2, size=(mutable.n_candidates,)).view(-1).bool()
else:
perm = torch.randperm(mutable.n_candidates)
mask = [i in perm[:mutable.n_chosen] for i in range(mutable.n_candidates)]
result[mutable.key] = torch.tensor(mask, dtype=torch.bool) # pylint: disable=not-callable
return result