本文整理汇总了Python中nnabla.functions.softmax函数的典型用法代码示例。如果您正苦于以下问题:Python softmax函数的具体用法?Python softmax怎么用?Python softmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了softmax函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sr_loss_with_uncertainty
def sr_loss_with_uncertainty(ctx, pred0, pred1, log_var0, log_var1):
#TODO: squared error/absolute error
s0 = F.exp(log_var0)
s1 = F.exp(log_var1)
squared_error = F.squared_error(F.softmax(pred0), F.softmax(pred1))
with nn.context_scope(ctx):
loss_sr = F.mean(squared_error * (1 / s0 + 1 / s1) + (s0 / s1 + s1 / s0)) * 0.5
return loss_sr
示例2: er_loss
def er_loss(ctx, pred):
with nn.context_scope(ctx):
bs = pred.shape[0]
d = np.prod(pred.shape[1:])
denominator = bs * d
pred_normalized = F.softmax(pred)
pred_log_normalized = F.log(F.softmax(pred))
loss_er = - F.sum(pred_normalized * pred_log_normalized) / denominator
return loss_er
示例3: kl_divergence
def kl_divergence(ctx, pred, label, log_var):
with nn.context_scope(ctx):
s = F.pow_scalar(F.exp(log_var), 0.5)
elms = softmax_with_temperature(ctx, label, s) \
* F.log(F.softmax(pred, axis=1))
loss = -F.mean(F.sum(elms, axis=1))
return loss
示例4: attention
def attention(k, q, v, div_dim=True, softmax=True):
v_shape = v.shape
k = F.identity(k)
q = F.identity(q)
k = F.reshape(k, (k.shape[0], np.prod(k.shape[1:])))
q = F.reshape(q, (q.shape[0], np.prod(q.shape[1:])))
v = q # F.reshape is inplace
cf = F.affine(q, F.transpose(k, (1, 0)))
if div_dim:
dim = np.prod(v_shape[1:])
cf /= np.sqrt(dim)
h = cf
if softmax:
h = F.softmax(h)
h = F.affine(h, v)x
h = F.reshape(h, v_shape)
return h
示例5: kl_divergence
def kl_divergence(ctx, pred, label):
with nn.context_scope(ctx):
elms = F.softmax(label, axis=1) * F.log(F.softmax(pred, axis=1))
loss = -F.mean(F.sum(elms, axis=1))
return loss
示例6: distance
def distance(y0, y1):
"""
Distance function is Kullback-Leibler Divergence for categorical distribution
"""
return F.kl_multinomial(F.softmax(y0), F.softmax(y1))
示例7: ce_loss_soft
def ce_loss_soft(ctx, pred, target):
with nn.context_scope(ctx):
#todo: devide or not
loss = - F.mean(F.sum(F.softmax(target) * F.log(F.softmax(pred)), axis=1))
return loss
示例8: sr_loss
def sr_loss(ctx, pred0, pred1):
with nn.context_scope(ctx):
pred_x_u0 = F.softmax(pred0)
pred_x_u1 = F.softmax(pred1)
loss_sr = F.mean(F.squared_error(pred_x_u0, pred_x_u1))
return loss_sr
示例9: softmax_with_temperature
def softmax_with_temperature(ctx, x, t):
with nn.context_scope(ctx):
h = x / t
h = F.softmax(h, axis=1)
return h