本文整理汇总了Python中chainer.functions.normalize方法的典型用法代码示例。如果您正苦于以下问题:Python functions.normalize方法的具体用法?Python functions.normalize怎么用?Python functions.normalize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.normalize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def forward(self, xs, hs=None, activation=None):
if hs is not None:
hx1, cx1, hx_emb, cx_emb = hs
else:
hx1 = cx1 = hx_emb = cx_emb = None
# forward to LSTM layers
hy_emb, cy_emb, ems = self.bi_lstm_emb(hx_emb, cx_emb, xs)
hy1, cy1, ys = self.bi_lstm1(hx1, cx1, ems)
# main branch
ys_stack = F.vstack(ys)
ys = self.linear1(ys_stack)
if activation:
ys = activation(ys)
ilens = [x.shape[0] for x in xs]
ys = F.split_axis(ys, np.cumsum(ilens[:-1]), axis=0)
# embedding branch
ems_stack = F.vstack(ems)
ems = F.normalize(F.tanh(self.linear2(ems_stack)))
ems = F.split_axis(ems, np.cumsum(ilens[:-1]), axis=0)
if not isinstance(ys, tuple):
ys = [ys]
ems = [ems]
return [hy1, cy1, hy_emb, cy_emb], ys, ems
示例2: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def check_forward(self, x_data, axis):
eps = self.eps
x = chainer.Variable(x_data)
y = functions.normalize(x, eps=eps, axis=axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.empty_like(self.x)
shape = self.x.shape
indices = []
axis_tuple = axis if isinstance(axis, tuple) else (axis,)
for i in six.moves.range(len(shape)):
if i not in axis_tuple:
indices.append(six.moves.range(shape[i]))
else:
indices.append([slice(None)])
indices_tuple = list(itertools.product(*indices))
for index in indices_tuple:
# Note: Casting back the result of `numpy.linalg.norm` to `x.dtype`
# because old NumPy casts it to float32 when a float16 value is
# given.
numerator = numpy.linalg.norm(self.x[index]).astype(x.dtype) + eps
y_expect[index] = self.x[index] / numerator
testing.assert_allclose(y_expect, y_data, **self.check_forward_options)
示例3: check_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def check_backward(self, x_data, axis, y_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_backward(
f, x_data, y_grad, **self.check_backward_options)
示例4: check_double_backward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def check_double_backward(self, x_data, axis, y_grad, x_grad_grad):
def f(x):
return functions.normalize(x, eps=self.eps, axis=axis)
gradient_check.check_double_backward(
f, x_data, y_grad, x_grad_grad,
**self.check_double_backward_options)
示例5: check_eps
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def check_eps(self, x_data):
x = chainer.Variable(x_data)
y = functions.normalize(x, axis=self.axis)
self.assertEqual(y.data.dtype, self.dtype)
y_data = cuda.to_cpu(y.data)
y_expect = numpy.zeros_like(self.x)
testing.assert_allclose(y_expect, y_data)
示例6: output_and_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def output_and_loss(self, h, t):
logit = self(h)
return F.softmax_cross_entropy(
logit, t, normalize=False, reduce='mean')
示例7: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def __call__(self, x):
if self.normalizedW is None:
if self.norm_to_one:
self.normalizedW = F.normalize(self.vocab_freq * self.W)
else:
self.normalizedW = self.norm_by_freq(self.vocab_freq)
return embed_id.embed_id(x, self.normalizedW, ignore_label=self.ignore_label)
# Definition of a recurrent net for language modeling
示例8: forward_seq_batch
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def forward_seq_batch(self, x_seq_batch, t_seq_batch, normalize=None):
y_seq_batch = self.encode_seq_batch(x_seq_batch)
loss = self.output_and_loss_from_seq_batch(
y_seq_batch, t_seq_batch, normalize)
return loss
示例9: output_and_loss_from_seq_batch
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def output_and_loss_from_seq_batch(self, y_seq_batch, t_seq_batch, normalize=None):
y = F.concat(y_seq_batch, axis=0)
y = F.dropout(y, ratio=self.dropout)
t = F.concat(t_seq_batch, axis=0)
loss = self.output.output_and_loss(y, t)
if normalize is not None:
loss *= 1. * t.shape[0] / normalize
else:
loss *= t.shape[0]
return loss
示例10: look
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def look(vertices, eye, direction=None, up=None):
"""
"Look at" transformation of vertices.
"""
assert (vertices.ndim == 3)
xp = chainer.cuda.get_array_module(vertices)
if direction is None:
direction = xp.array([0, 0, 1], 'float32')
if up is None:
up = xp.array([0, 1, 0], 'float32')
if isinstance(eye, list) or isinstance(eye, tuple):
eye = xp.array(eye, 'float32')
if eye.ndim == 1:
eye = eye[None, :]
if direction.ndim == 1:
direction = direction[None, :]
if up.ndim == 1:
up = up[None, :]
# create new axes
z_axis = cf.normalize(direction)
x_axis = cf.normalize(neural_renderer.cross(up, z_axis))
y_axis = cf.normalize(neural_renderer.cross(z_axis, x_axis))
# create rotation matrix: [bs, 3, 3]
r = cf.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
if r.shape[0] != vertices.shape[0]:
r = cf.broadcast_to(r, vertices.shape)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = cf.broadcast_to(eye[:, None, :], vertices.shape)
vertices = vertices - eye
vertices = cf.matmul(vertices, r, transb=True)
return vertices
示例11: look_at
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def look_at(vertices, eye, at=None, up=None):
"""
"Look at" transformation of vertices.
"""
assert (vertices.ndim == 3)
xp = chainer.cuda.get_array_module(vertices)
batch_size = vertices.shape[0]
if at is None:
at = xp.array([0, 0, 0], 'float32')
if up is None:
up = xp.array([0, 1, 0], 'float32')
if isinstance(eye, list) or isinstance(eye, tuple):
eye = xp.array(eye, 'float32')
if eye.ndim == 1:
eye = cf.tile(eye[None, :], (batch_size, 1))
if at.ndim == 1:
at = cf.tile(at[None, :], (batch_size, 1))
if up.ndim == 1:
up = cf.tile(up[None, :], (batch_size, 1))
# create new axes
z_axis = cf.normalize(at - eye)
x_axis = cf.normalize(neural_renderer.cross(up, z_axis))
y_axis = cf.normalize(neural_renderer.cross(z_axis, x_axis))
# create rotation matrix: [bs, 3, 3]
r = cf.concat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), axis=1)
if r.shape[0] != vertices.shape[0]:
r = cf.broadcast_to(r, (vertices.shape[0], 3, 3))
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = cf.broadcast_to(eye[:, None, :], vertices.shape)
vertices = vertices - eye
vertices = cf.matmul(vertices, r, transb=True)
return vertices
示例12: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def forward(self, x):
"""Normalize input and scale it.
Args:
x (chainer.Variable): A variable holding 4-dimensional array.
Its :obj:`dtype` is :obj:`numpy.float32`.
Returns:
chainer.Variable:
The shape and :obj:`dtype` are same as those of input.
"""
x = F.normalize(x, eps=self.eps, axis=1)
scale = F.broadcast_to(self.scale[:, np.newaxis, np.newaxis], x.shape)
return x * scale
示例13: check_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def check_forward(self, x_data, proxy_data, labels_data):
x = chainer.Variable(x_data)
proxy = chainer.Variable(proxy_data)
x = F.normalize(x)
loss = proxy_nca_loss(x, proxy, labels_data)
self.assertEqual(loss.dtype, np.float32)
示例14: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def __call__(self, x, subtract_mean=True):
if subtract_mean:
x = x - self._image_mean
# h = super(ModifiedGoogLeNet, self).__call__(
# x, layers=['pool5'], train=train)['pool5']
# h = self.bn_fc(h, test=not train)
# y = self.fc(h)
# return y
h = F.relu(self.conv1(x))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5)
h = F.relu(self.conv2_reduce(h))
h = F.relu(self.conv2(h))
h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4/5)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc3a(h)
h = self.inc3b(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc4a(h)
h = self.inc4b(h)
h = self.inc4c(h)
h = self.inc4d(h)
h = self.inc4e(h)
h = F.max_pooling_2d(h, 3, stride=2)
h = self.inc5a(h)
h = self.inc5b(h)
h = F.average_pooling_2d(h, 7, stride=1)
h = self.bn_fc(h)
y = self.fc(h)
if self.normalize_output:
y = F.normalize(y)
return y
示例15: proxy_nca_loss
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import normalize [as 别名]
def proxy_nca_loss(x, proxy, labels):
"""Proxy-NCA loss function.
Args:
x (:class:`~chainer.Variable`):
L2 normalized anchor points whose shape is (B, D), where B is the
batch size and D is the number of dimensions of feature vector.
proxy (:class:`~chainer.Variable` or :class:`~chainer.Parameter`):
Proxies whose shape is (K, D), where K is the number of classes
in the dataset.
labels (:class:`numpy.ndarray`):
Class labels associated to x. The shape is (B,) and dtype is int.
Note that the class IDs must be 0, 1, ..., K-1.
Returns:
:class:`~chainer.Variable`: Loss value.
See: `No Fuss Distance Metric Learning using Proxies \
<http://openaccess.thecvf.com/content_ICCV_2017/papers/\
Movshovitz-Attias_No_Fuss_Distance_ICCV_2017_paper.pdf>`_
"""
proxy = F.normalize(proxy)
distance = squared_distance_matrix(x, proxy)
d_posi = distance[np.arange(len(x)), labels]
# For each row, remove one element corresponding to the positive distance
B, K = distance.shape # batch size and the number of classes
mask = np.tile(np.arange(K), (B, 1)) != labels[:, None]
d_nega = distance[mask].reshape(B, K - 1)
log_denominator = F.logsumexp(-d_nega, axis=1)
loss = d_posi + log_denominator
return F.average(loss)