本文整理汇总了Python中chainer.functions.swapaxes方法的典型用法代码示例。如果您正苦于以下问题:Python functions.swapaxes方法的具体用法?Python functions.swapaxes怎么用?Python functions.swapaxes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.swapaxes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: channel_shuffle
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def channel_shuffle(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, groups, channels_per_group, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
示例2: channel_shuffle2
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : chainer.Variable or numpy.ndarray or cupy.ndarray
Input variable.
groups : int
Number of groups.
Returns
-------
chainer.Variable or numpy.ndarray or cupy.ndarray
Resulted variable.
"""
batch, channels, height, width = x.shape
channels_per_group = channels // groups
x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width))
x = F.swapaxes(x, axis1=1, axis2=2)
x = F.reshape(x, shape=(batch, channels, height, width))
return x
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def __call__(self, inputs):
pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs)
batch_size, past_len, _ = pos_x.shape
h_pos = self.pos_encoder(pos_x)
h_ego = self.ego_encoder(ego_x)
h = F.concat((h_pos, h_ego), axis=1) # (B, C, 2)
h = self.inter(h)
h_pos = self.pos_decoder(h)
pred_y = self.last(h_pos) # (B, 10, C+6+28)
pred_y = F.swapaxes(pred_y, 1, 2)
pred_y = pred_y[:, :pos_y.shape[1], :]
loss = F.mean_squared_error(pred_y, pos_y)
pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape)
pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean
return loss, pred_y, None
示例4: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def forward(self, xs, ilens):
"""Subsample x.
:param chainer.Variable x: input tensor
:return: subsampled x and mask
"""
xs = self.xp.array(xs[:, None])
xs = F.relu(self.conv1(xs))
xs = F.relu(self.conv2(xs))
batch, _, length, _ = xs.shape
xs = self.out(F.swapaxes(xs, 1, 2).reshape(batch * length, -1))
xs = self.pe(xs.reshape(batch, length, -1))
# change ilens accordingly
ilens = np.ceil(np.array(ilens, dtype=np.float32) / 2).astype(np.int)
ilens = np.ceil(np.array(ilens, dtype=np.float32) / 2).astype(np.int)
return xs, ilens
示例5: propdown
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def propdown(self, hid):
""" This function propagates the hidden units activation downwords to the visible units
:param hid: Variable Matrix(batch_size, out_channels, image_height_out, image_width_out) - given h_sample
:return: Variable Matrix(batch_size, in_channels, image_height, image_width) - probability for each visible units to be v_j = 1
"""
batch_size = hid.data.shape[0]
if self.real == 0:
W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
pre_sigmoid_activation = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)
# F.matmul(hid, self.l.W) + F.broadcast_to(self.l.a, (batch_size, self.n_visible))
v_mean = F.sigmoid(pre_sigmoid_activation)
#print('W info ', self.conv.W.data.shape, 'W_flipped info ', W_flipped.data.shape)
#print('W info ', self.conv.W.data[3, 0, 2, 3], 'W_flipped info ', W_flipped.data[0, 3, 8, 7])
#print('W info ', self.conv.W.data[3, 0, 8, 7], 'W_flipped info ', W_flipped.data[0, 3, 2, 3])
#print('W info ', self.conv.W.data[19, 0, 4, 0], 'W_flipped info ', W_flipped.data[0, 19, 6, 10])
#print('pre_sigmoidactivation', F.sum(pre_sigmoid_activation).data)
#print('v_mean', v_mean.data.shape)
#print('v_mean sum', F.sum(v_mean).data)
#print('hid', hid.data.shape)
else:
# TODO: check
W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
v_mean = F.convolution_2d(hid, W_flipped, self.conv.a, pad=self.ksize-1)
return v_mean
示例6: reconstruct
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def reconstruct(self, v):
"""
:param v: Variable Matrix(batch_size, in_channels, image_height, image_width)
:return: reconstructed_v, Variable Matrix(batch_size, in_channels, image_height, image_width)
"""
batch_size = v.data.shape[0]
xp = cuda.get_array_module(v.data)
if self.real == 0:
h = F.sigmoid(self.conv(v))
else:
std_ch = xp.reshape(self.std, (1, self.in_channels, 1, 1))
h = F.sigmoid(self.conv(v / std_ch))
# F.sigmoid(F.matmul(v, self.l.W, transb=True) + F.broadcast_to(self.l.b, (batch_size, self.n_hidden)))
W_flipped = F.swapaxes(CF.flip(self.conv.W, axes=(2, 3)), axis1=0, axis2=1)
reconstructed_v = F.sigmoid(F.convolution_2d(h, W_flipped, self.conv.a, pad=self.ksize-1))
# = F.sigmoid(F.matmul(h, self.l.W) + F.broadcast_to(self.l.a, (batch_size, self.n_visible)))
return reconstructed_v
示例7: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def __call__(self, xs):
"""
Forward pass of a sentence.
:param xs: a batch of sentences
:return h: final hidden states
"""
xs = self.embed(xs)
xs = F.swapaxes(xs, 0, 1) # time, batch, embed
self.rnn.reset_state()
for x in xs:
h = self.rnn(x)
h = F.tanh(self.linear(h))
return h
示例8: reorganize_by_head
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def reorganize_by_head(Q, n_heads):
mb_size, n_Q, d_model = Q.data.shape
assert d_model%n_heads == 0
head_size = d_model // n_heads
reshaped_Q = F.reshape(Q, (mb_size, n_Q, n_heads, head_size))
return F.swapaxes(reshaped_Q, 1, 2)
示例9: undo_reorganize_by_head
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def undo_reorganize_by_head(Q):
mb_size, n_heads, n_Q, head_size = Q.data.shape
swapped_Q = F.swapaxes(Q, 1, 2)
return F.reshape(swapped_Q, (mb_size, n_Q, -1))
示例10: assign
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def assign(self, target : 'Object'):
# unimplemented
temp = np.array(0)
for v in dir(temp):
func = values.Object(
values.FuncValue(functions.UnimplementedFunction(v), target, None))
target.attributes.set_predefined_obj(str(v), func)
shape_func = values.Object(
values.FuncValue(NDArrayShapeFunction(), target, None))
target.attributes.set_predefined_obj('shape', shape_func)
size_func = values.Object(
values.FuncValue(NDArraySizeFunction(), target, None))
target.attributes.set_predefined_obj('size', size_func)
cumsum_func = values.Object(
values.FuncValue(NDArrayCumsumFunction(), target, None))
target.attributes.set_predefined_obj('cumsum', cumsum_func)
def add_chainer_function(func):
func_ = values.Object(
values.FuncValue(NDArrayChainerFunction(func), target, None))
target.attributes.set_predefined_obj(func.__name__, func_)
add_chainer_function(F.reshape)
add_chainer_function(F.sum)
add_chainer_function(F.swapaxes)
add_chainer_function(F.transpose)
示例11: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def forward(self, xs, ilens):
'''VGG2L forward
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
xs = F.pad_sequence(xs)
# x: utt x 1 (input channel num) x frame x dim
xs = F.swapaxes(F.reshape(
xs, (xs.shape[0], xs.shape[1], self.in_channel, xs.shape[2] // self.in_channel)), 1, 2)
xs = F.relu(self.conv1_1(xs))
xs = F.relu(self.conv1_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
xs = F.relu(self.conv2_1(xs))
xs = F.relu(self.conv2_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
# change ilens accordingly
# EDIT(hamaji): ChxVM puts int32 on GPU and it hurts the performance.
# TODO(hamaji): Fix device assignment to get rid of this change.
ilens = (ilens + 1) // 2
ilens = (ilens + 1) // 2
# ilens = self.xp.array(self.xp.ceil(self.xp.array(
# ilens, dtype=np.float32) / 2), dtype=np.int32)
# ilens = self.xp.array(self.xp.ceil(self.xp.array(
# ilens, dtype=np.float32) / 2), dtype=np.int32)
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs = F.swapaxes(xs, 1, 2)
xs = F.reshape(
xs, (xs.shape[0], xs.shape[1], xs.shape[2] * xs.shape[3]))
xs = [xs[i, :ilens[i], :] for i in range(len(ilens))]
return xs, ilens
示例12: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def forward(self, x):
y1 = F.swapaxes(x, 1, 3)
y2 = F.swapaxes(x, 0, 1)
return y1, y2
# ======================================
示例13: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def forward(self, xs, ilens):
'''VGG2L forward
:param xs:
:param ilens:
:return:
'''
logging.info(self.__class__.__name__ + ' input lengths: ' + str(ilens))
# x: utt x frame x dim
xs = F.pad_sequence(xs)
# x: utt x 1 (input channel num) x frame x dim
xs = F.swapaxes(F.reshape(
xs, (xs.shape[0], xs.shape[1], self.in_channel, xs.shape[2] // self.in_channel)), 1, 2)
xs = F.relu(self.conv1_1(xs))
xs = F.relu(self.conv1_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
xs = F.relu(self.conv2_1(xs))
xs = F.relu(self.conv2_2(xs))
xs = F.max_pooling_2d(xs, 2, stride=2)
# change ilens accordingly
# EDIT(hamaji): XCVM puts int32 on GPU and it hurts the performance.
# TODO(hamaji): Fix device assignment to get rid of this change.
ilens = (ilens + 1) // 2
ilens = (ilens + 1) // 2
# ilens = self.xp.array(self.xp.ceil(self.xp.array(
# ilens, dtype=np.float32) / 2), dtype=np.int32)
# ilens = self.xp.array(self.xp.ceil(self.xp.array(
# ilens, dtype=np.float32) / 2), dtype=np.int32)
# x: utt_list of frame (remove zeropaded frames) x (input channel num x dim)
xs = F.swapaxes(xs, 1, 2)
xs = F.reshape(
xs, (xs.shape[0], xs.shape[1], xs.shape[2] * xs.shape[3]))
xs = [xs[i, :ilens[i], :] for i in range(len(ilens))]
return xs, ilens
示例14: forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def forward(self, x):
y1 = F.swapaxes(x, 1, 3)
y2 = F.swapaxes(x, 0, 1)
return y1, y2
示例15: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import swapaxes [as 别名]
def __call__(self, x, batch_size):
# x: (BT, F)
# TODO: if chainer >= 5.0, use linear functions with 'n_batch_axes'
# and x be (B, T, F), then remove batch_size.
q = self.linearQ(x).reshape(batch_size, -1, self.h, self.d_k)
k = self.linearK(x).reshape(batch_size, -1, self.h, self.d_k)
v = self.linearV(x).reshape(batch_size, -1, self.h, self.d_k)
scores = F.matmul(
F.swapaxes(q, 1, 2), k.transpose(0, 2, 3, 1)) / np.sqrt(self.d_k)
# scores: (B, h, T, T)
self.att = F.softmax(scores, axis=3)
p_att = F.dropout(self.att, self.dropout)
x = F.matmul(p_att, F.swapaxes(v, 1, 2))
x = F.swapaxes(x, 1, 2).reshape(-1, self.h * self.d_k)
return self.linearO(x)