本文整理汇总了Python中chainer.functions.concat方法的典型用法代码示例。如果您正苦于以下问题:Python functions.concat方法的具体用法?Python functions.concat怎么用?Python functions.concat使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.concat方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例2: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, prev_hg, prev_he, prev_ce, x, v, r, u):
xu = cf.concat((x, u), axis=1)
xu = self.downsample_xu(xu)
v = self.broadcast_v(v)
if r.shape[2] == 1:
r = self.broadcast_r(r)
lstm_input = cf.concat((prev_he, prev_hg, xu, v, r), axis=1)
gate_inputs = self.lstm(lstm_input)
if self.use_cuda_kernel:
next_h, next_c = CoreFunction()(gate_inputs, prev_ce)
else:
forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis(
gate_inputs, 4, axis=1)
forget_gate = cf.sigmoid(forget_gate_input)
input_gate = cf.sigmoid(input_gate_input)
next_c = forget_gate * prev_ce + input_gate * cf.tanh(tanh_input)
output_gate = cf.sigmoid(output_gate_input)
next_h = output_gate * cf.tanh(next_c)
return next_h, next_c
示例3: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, prev_hg, prev_cg, prev_z, v, r, prev_u):
v = self.broadcast_v(v)
if r.shape[2] == 1:
r = self.broadcast_r(r)
lstm_input = cf.concat((prev_hg, v, r, prev_z), axis=1)
gate_inputs = self.lstm(lstm_input)
forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis(
gate_inputs, 4, axis=1)
forget_gate = cf.sigmoid(forget_gate_input)
input_gate = cf.sigmoid(input_gate_input)
next_c = forget_gate * prev_cg + input_gate * cf.tanh(tanh_input)
output_gate = cf.sigmoid(output_gate_input)
next_h = output_gate * cf.tanh(next_c)
next_u = self.upsample_h(next_h) + prev_u
return next_h, next_c, next_u
示例4: n_step_forward
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def n_step_forward(self, x, recurrent_state):
"""Multi-step batch forward computation.
This method sequentially applies layers as chainer.Sequential does.
Args:
x (list): Input sequences. Each sequence should be a variable whose
first axis corresponds to time or a tuple of such variables.
recurrent_state (object): Batched recurrent state. If set to None,
it is initialized.
output_mode (str): If set to 'concat', the output value is
concatenated into a single large batch, which can be suitable
for loss computation. If set to 'split', the output value is
a list of output sequences.
Returns:
object: Output sequences. See the description of the `output_mode`
argument.
object: New batched recurrent state.
"""
raise NotImplementedError
示例5: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x, recurrent_state):
"""One-step batch forward computation.
Args:
x (chainer.Variable, ndarray, or tuple): One-step batched input.
recurrent_state (object): Batched recurrent state.
Returns:
chainer.Variable, ndarray, or tuple: One-step batched output.
object: New batched recurrent state.
"""
assert isinstance(x, (chainer.Variable, self.xp.ndarray))
return self.n_step_forward(
split_one_step_batch_input(x),
recurrent_state,
output_mode='concat',
)
示例6: channelwise_inhibited
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def channelwise_inhibited(self, h):
self.c = random.randint(0, 2)
xp = cuda.get_array_module(h.data)
num = h.data.shape[0]
h = F.split_axis(h, 3, 1)
c = F.reshape(h[self.c], (num, 16, 16))
z = Variable(xp.zeros_like(c.data), 'AUTO')
c = F.batch_matmul(c, z)
c = F.reshape(c, (num, 1, 16, 16))
hs = []
for i, s in enumerate(h):
if i == self.c:
hs.append(c)
else:
hs.append(s)
return F.concat(hs, 1)
示例7: channelwise_inhibited
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def channelwise_inhibited(self, h):
xp = cuda.get_array_module(h.data)
num = h.data.shape[0]
h = F.split_axis(h, 3, 1)
c = F.reshape(h[self.c], (num, 16, 16))
z = Variable(xp.zeros_like(c.data), 'AUTO')
c = F.batch_matmul(c, z)
c = F.reshape(c, (num, 1, 16, 16))
hs = []
for i, s in enumerate(h):
if i == self.c:
hs.append(c)
else:
hs.append(s)
return F.concat(hs, 1)
示例8: apply_to_seq
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def apply_to_seq(self, seq_list):
mb_size = len(seq_list)
mb_initial_cell, mb_initial_state = self.get_initial_states(mb_size)
return self.nstep_lstm(mb_initial_cell, mb_initial_state, seq_list)
# class DoubleGRU(Chain):
# def __init__(self, H, I):
# log.info("using double GRU")
# self.H1 = H/2
# self.H2 = H - self.H1
# super(DoubleGRU, self).__init__(
# gru1 = faster_gru.GRU(self.H1, I),
# gru2 = faster_gru.GRU(self.H2, self.H1)
# )
#
# def __call__(self, prev_state, inpt):
# prev_state1, prev_state2 = F.split_axis(prev_state, (self.H1,), axis = 1)
#
# prev_state1 = self.gru1(prev_state1, inpt)
# prev_state2 = self.gru2(prev_state2, prev_state1)
#
# return F.concat((prev_state1, prev_state2), axis = 1)
示例9: advance_state
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def advance_state(self, previous_states, prev_y):
current_mb_size = prev_y.data.shape[0]
assert self.mb_size is None or current_mb_size <= self.mb_size
if current_mb_size < len(previous_states[0].data):
truncated_states = [None] * len(previous_states)
for num_state in six.moves.range(len(previous_states)):
truncated_states[num_state], _ = F.split_axis(
previous_states[num_state], (current_mb_size,), 0)
previous_states = tuple(truncated_states)
output_state = previous_states[-1]
if self.decoder_chain.use_goto_attention:
ci, attn = self.compute_ctxt(output_state, prev_y)
else:
ci, attn = self.compute_ctxt(output_state)
concatenated = F.concat((prev_y, ci))
new_states = self.decoder_chain.gru(previous_states, concatenated)
return new_states, concatenated, attn
示例10: compute_logits
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def compute_logits(self, seq_list, encoded_input, mask_input):
mb_size = len(seq_list)
max_length_1 = max(len(x) for x in seq_list)
x, mask = self.make_batch(seq_list)
# print "padded_data", x
# print "mask", mask
assert self.xp.all(mask_input == self.xp.broadcast_to(mask_input[:,0:1,0:1,:], mask_input.shape))
encoded = self.emb(x)
encoded += self.get_pos_vect(mb_size, max_length_1)
if self.dropout is not None:
encoded = F.dropout(encoded, self.dropout)
bos_plus_encoded = F.concat((F.broadcast_to(self.bos_encoding, (mb_size, 1, self.d_model)), encoded), axis=1)
cross_mask = self.xp.broadcast_to(mask_input[:,0:1,0:1,:], (mask_input.shape[0], self.n_heads, bos_plus_encoded.data.shape[1], mask_input.shape[3]))
final_layer = self.encoding_layers(bos_plus_encoded, encoded_input, mask, cross_mask)
logits = apply_linear_layer_to_last_dims(final_layer, self.logits_layer)
return logits
示例11: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x):
br0 = self.Branch_0.Conv2d_1x1(x)
br1 = self.Branch_1.Conv2d_0a_1x1(x)
br1 = self.Branch_1.Conv2d_0b_3x3(br1)
br2 = self.Branch_2.Conv2d_0a_1x1(x)
br2 = self.Branch_2.Conv2d_0b_3x3(br2)
br2 = self.Branch_2.Conv2d_0c_3x3(br2)
mixed = F.concat((br0, br1, br2), axis=1)
lazy_init_conv_to_join(self, x)
up = self.Conv2d_1x1(mixed)
x += self.scale * up
if self.activation_fn is not None:
x = self.activation_fn(x)
return x
示例12: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, s, xs):
"""Calculate all hidden states and cell states.
Args:
s (~chainer.Variable or None): Initial (hidden & cell) states. If ``None``
is specified zero-vector is used.
xs (list of ~chianer.Variable): List of input sequences.
Each element ``xs[i]`` is a :class:`chainer.Variable` holding
a sequence.
Return:
(hy,cy): a pair of hidden and cell states at the end of the sequence,
ys: a hidden state sequence at the last layer
"""
if len(xs) > 1:
sections = np.cumsum(np.array([len(x) for x in xs[:-1]], dtype=np.int32))
xs = F.split_axis(self.embed(F.concat(xs, axis=0)), sections, axis=0)
else:
xs = [ self.embed(xs[0]) ]
if s is not None:
hy, cy, ys = self.lstm(s[0], s[1], xs)
else:
hy, cy, ys = self.lstm(None, None, xs)
return (hy,cy), ys
示例13: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x, h, train):
xp = chainer.cuda.get_array_module(x)
param_num = 0
for name, f in self.forward:
if 'conv' in name:
x = getattr(self, name)(x)
param_num += (f.W.shape[0]*f.W.shape[2]*f.W.shape[3]*f.W.shape[1]+f.W.shape[0])
elif 'bn' in name:
x = getattr(self, name)(x, not train)
param_num += x.data.shape[1]*2
elif 'act' in name:
x = f(x)
else:
print('not defined function at ResBlock __call__')
exit(1)
in_data = [x, h]
# check of the image size
small_in_id, large_in_id = (0, 1) if in_data[0].shape[2] < in_data[1].shape[2] else (1, 0)
pool_num = xp.floor(xp.log2(in_data[large_in_id].shape[2] / in_data[small_in_id].shape[2]))
for _ in xp.arange(pool_num):
in_data[large_in_id] = F.max_pooling_2d(in_data[large_in_id], self.pool_size, self.pool_size, 0, False)
# check of the channel size
small_ch_id, large_ch_id = (0, 1) if in_data[0].shape[1] < in_data[1].shape[1] else (1, 0)
pad_num = int(in_data[large_ch_id].shape[1] - in_data[small_ch_id].shape[1])
tmp = in_data[large_ch_id][:, :pad_num, :, :]
in_data[small_ch_id] = F.concat((in_data[small_ch_id], tmp * 0), axis=1)
return (F.relu(in_data[0]+in_data[1]), param_num)
# Construct a CNN model using CGP (list)
示例14: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x):
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(x, (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
示例15: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import concat [as 别名]
def __call__(self, x, v):
v = self.broadcast_v(v)
resnet_in = cf.relu(self.conv1_1(x))
residual = cf.relu(self.conv1_res(resnet_in))
out = cf.relu(self.conv1_2(resnet_in))
out = cf.relu(self.conv1_3(out)) + residual
resnet_in = cf.concat((out, v), axis=1)
residual = cf.relu(self.conv2_res(resnet_in))
out = cf.relu(self.conv2_1(resnet_in))
out = cf.relu(self.conv2_2(out)) + residual
out = self.conv2_3(out)
return out