本文整理汇总了Python中chainer.functions.absolute方法的典型用法代码示例。如果您正苦于以下问题:Python functions.absolute方法的具体用法?Python functions.absolute怎么用?Python functions.absolute使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.absolute方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例2: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def __call__(self, x):
N = x.data.shape[0]
h = F.leaky_relu(self.c0_0(x))
h = F.leaky_relu(self.bn0_1(self.c0_1(h)))
h = F.leaky_relu(self.bn1_0(self.c1_0(h)))
h = F.leaky_relu(self.bn1_1(self.c1_1(h)))
h = F.leaky_relu(self.bn2_0(self.c2_0(h)))
h = F.leaky_relu(self.bn2_1(self.c2_1(h)))
feature = F.reshape(F.leaky_relu(self.c3_0(h)), (N, 8192))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
return self.l4(h)
示例3: calc_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def calc_loss(self, grids, image_size, **kwargs):
normalize = kwargs.get('normalize', True)
corner_coordinates = self.get_corners(grids, image_size, scale_to_image_size=False)
# determine whether a point is out of the image, image range is [-1, 1]
# everything outside of this increases the loss!
bbox = F.concat(corner_coordinates, axis=0)
top_loss = bbox + 1.5
bottom_loss = bbox - 1.5
# do not penalize anything inside the image
top_loss = F.absolute(F.minimum(top_loss, self.xp.zeros_like(top_loss.array)))
top_loss = F.reshape(top_loss, (len(corner_coordinates), -1))
bottom_loss = F.maximum(bottom_loss, self.xp.zeros_like(bottom_loss.array))
bottom_loss = F.reshape(bottom_loss, (len(corner_coordinates), -1))
loss = F.sum(F.concat([top_loss, bottom_loss], axis=0), axis=0)
if normalize:
loss = F.sum(loss)
return loss
示例4: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def __call__(self, x):
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(x, (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例5: _compute_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute the Q-learning loss for a batch of experiences
Args:
exp_batch (dict): A dict of batched arrays of transitions
Returns:
Computed loss from the minibatch of experiences
"""
y, t = self._compute_y_and_t(exp_batch)
if errors_out is not None:
del errors_out[:]
delta = F.absolute(y - t)
if delta.ndim == 2:
delta = F.sum(delta, axis=1)
delta = cuda.to_cpu(delta.array)
for e in delta:
errors_out.append(e)
if 'weights' in exp_batch:
return compute_weighted_value_loss(
y, t, exp_batch['weights'],
clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(y, t, clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
示例6: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def forward(self, x):
y1 = F.absolute(x)
return y1
示例7: _smooth_l1_loss_base
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def _smooth_l1_loss_base(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = F.absolute(diff)
flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)
y = (flag * (sigma2 / 2.) * F.square(diff) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return F.sum(y, axis=1)
示例8: _smooth_l1_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = F.absolute(diff)
flag = (abs_diff.array < (1. / sigma2)).astype(np.float32)
y = (flag * (sigma2 / 2.) * F.square(diff) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return F.sum(y)
示例9: _smooth_l1_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = F.absolute(diff)
flag = (abs_diff.data < (1. / sigma2)).astype(np.float32)
y = (flag * (sigma2 / 2.) * F.square(diff) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return F.sum(y)
示例10: megnet_softplus
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def megnet_softplus(x):
"""Modified softplus function used by MEGNet
The original implemantation is below.
https://github.com/materialsvirtuallab/megnet/blob/f91773f0f3fa8402b494638af9ef2ed2807fcba7/megnet/activations.py#L6
Args:
x (Variable): Input variable
Returns:
output (Variable): Output variable whose shape is same with `x`
"""
return functions.relu(x) + \
functions.log(0.5 * functions.exp(-functions.absolute(x)) + 0.5)
示例11: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def __call__(self, adj):
masked_adj = adj[:, :, self.mask]
log_s, t = self._s_t_functions(masked_adj)
t = F.broadcast_to(t, adj.shape)
s = F.sigmoid(log_s + 2)
s = F.broadcast_to(s, adj.shape)
adj = adj * self.mask + adj * (s * ~self.mask) + t * (~self.mask)
log_det_jacobian = F.sum(F.log(F.absolute(s)), axis=(1, 2, 3))
return adj, log_det_jacobian
示例12: _compute_ddqn_losses
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def _compute_ddqn_losses(self, exp_batch, errors_out=None):
"""Compute the Q-learning losses for a batch of experiences
Args:
exp_batch (dict): A dict of batched arrays of transitions
Returns:
Computed loss from the minibatch of experiences
"""
y, t = self._compute_y_and_ts(exp_batch)
n_branches = exp_batch['action'].shape[1]
# Calculate the errors_out for priorities with the 1-step err
del errors_out[:]
delta = F.absolute(y - t)
if delta.ndim == 2:
delta = F.sum(delta, axis=1)
delta = cuda.to_cpu(delta.array)
for e in delta:
errors_out.append(e)
is_1_step = self.xp.abs(1. - exp_batch["is_n_step"]).reshape(-1, 1)
is_1_step = self.xp.tile(is_1_step, (1, n_branches)).reshape(-1)
is_n_step = exp_batch['is_n_step'].reshape(-1, 1)
is_n_step = self.xp.tile(is_n_step, (1, n_branches)).reshape(-1)
weights = exp_batch['weights'].reshape(-1, 1)
weights = F.tile(weights, (1, n_branches)).reshape(-1)
loss_1step = compute_weighted_value_loss(
y, t, weights,
mask=is_1_step,
clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
loss_nstep = compute_weighted_value_loss(
y, t, weights,
mask=is_n_step,
clip_delta=self.clip_delta,
batch_accumulator=self.batch_accumulator)
return loss_nstep, loss_1step
示例13: compute_tv_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import absolute [as 别名]
def compute_tv_loss(images, masks):
# s1 = cf.absolute(images[:, :, 1:, :-1] - images[:, :, :-1, :-1])
# s2 = cf.absolute(images[:, :, :-1, 1:] - images[:, :, :-1, :-1])
s1 = cf.square(images[:, :, 1:, :-1] - images[:, :, :-1, :-1])
s2 = cf.square(images[:, :, :-1, 1:] - images[:, :, :-1, :-1])
masks = cf.broadcast_to(masks[:, None, :-1, :-1], s1.shape)
masks = masks.data == 1
return cf.sum(masks * (s1 + s2))