本文整理汇总了Python中chainer.functions.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python functions.tanh方法的具体用法?Python functions.tanh怎么用?Python functions.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.functions
的用法示例。
在下文中一共展示了functions.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __call__(self, prev_hg, prev_he, prev_ce, x, v, r, u):
xu = cf.concat((x, u), axis=1)
xu = self.downsample_xu(xu)
v = self.broadcast_v(v)
if r.shape[2] == 1:
r = self.broadcast_r(r)
lstm_input = cf.concat((prev_he, prev_hg, xu, v, r), axis=1)
gate_inputs = self.lstm(lstm_input)
if self.use_cuda_kernel:
next_h, next_c = CoreFunction()(gate_inputs, prev_ce)
else:
forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis(
gate_inputs, 4, axis=1)
forget_gate = cf.sigmoid(forget_gate_input)
input_gate = cf.sigmoid(input_gate_input)
next_c = forget_gate * prev_ce + input_gate * cf.tanh(tanh_input)
output_gate = cf.sigmoid(output_gate_input)
next_h = output_gate * cf.tanh(next_c)
return next_h, next_c
示例2: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __call__(self, prev_hg, prev_cg, prev_z, v, r, prev_u):
v = self.broadcast_v(v)
if r.shape[2] == 1:
r = self.broadcast_r(r)
lstm_input = cf.concat((prev_hg, v, r, prev_z), axis=1)
gate_inputs = self.lstm(lstm_input)
forget_gate_input, input_gate_input, tanh_input, output_gate_input = cf.split_axis(
gate_inputs, 4, axis=1)
forget_gate = cf.sigmoid(forget_gate_input)
input_gate = cf.sigmoid(input_gate_input)
next_c = forget_gate * prev_cg + input_gate * cf.tanh(tanh_input)
output_gate = cf.sigmoid(output_gate_input)
next_h = output_gate * cf.tanh(next_c)
next_u = self.upsample_h(next_h) + prev_u
return next_h, next_c, next_u
示例3: bound_by_tanh
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def bound_by_tanh(x, low, high):
"""Bound a given value into [low, high] by tanh.
Args:
x (chainer.Variable): value to bound
low (numpy.ndarray): lower bound
high (numpy.ndarray): upper bound
Returns: chainer.Variable
"""
assert isinstance(x, chainer.Variable)
assert low is not None
assert high is not None
xp = cuda.get_array_module(x.array)
x_scale = (high - low) / 2
x_scale = xp.expand_dims(xp.asarray(x_scale), axis=0)
x_mean = (high + low) / 2
x_mean = xp.expand_dims(xp.asarray(x_mean), axis=0)
return F.tanh(x) * x_scale + x_mean
示例4: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __call__(self, x):
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.max_pooling_2d(h, 2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.spatial_pyramid_pooling_2d(h, 3, F.MaxPooling2D)
h = F.tanh(self.fc4(h))
h = F.dropout(h, ratio=.5, train=self.train)
h = F.tanh(self.fc5(h))
h = F.dropout(h, ratio=.5, train=self.train)
h = self.fc6(h)
return h
示例5: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __call__(self, x, h, c):
hy = []
cy = []
for i, name in enumerate(self.x_amps.layer_names):
hx_i = h[i]
cx_i = c[i]
gates = self.x_amps[name](x) + self.h_amps[name](hx_i)
i_gate, f_gate, c_gate, o_gate = F.split_axis(gates, indices_or_sections=4, axis=1)
i_gate = F.sigmoid(i_gate)
f_gate = F.sigmoid(f_gate)
c_gate = F.tanh(c_gate)
o_gate = F.sigmoid(o_gate)
cy_i = (f_gate * cx_i) + (i_gate * c_gate)
hy_i = o_gate * F.sigmoid(cy_i)
cy.append(cy_i)
hy.append(hy_i)
x = self.dropout(hy_i)
return hy, cy
示例6: __call__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __call__(self, x):
if self.model_name == 'rnn' or self.model_name == 'lstm':
h0 = self.embed(x[:, self.window_size - 1])
h1 = self.l1(h0)
# h2 = self.l2(F.dropout(h1))
y = self.l3(h1)
if self.model_name == 'lr' or self.model_name == '2FFNN':
h = self.embed(x)
h = h.reshape((h.shape[0], h.shape[1] * h.shape[2]))
if self.model_name == 'lr':
y = self.lr(h)
if self.model_name == '2FFNN':
y = self.nn1(h)
y = F.tanh(y)
y = self.nn2(y)
return y
# Dataset iterator to create a batch of sequences at different positions.
# This iterator returns a pair of current words and the next words. Each
# example is a part of sequences starting from the different offsets
# equally spaced within the whole sequence.
示例7: attend
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def attend(self, encoded_features):
self.out_lstm.reset_state()
transformed_encoded_features = F.concat([F.expand_dims(self.transform_encoded_features(feature), axis=1) for feature in encoded_features], axis=1)
concat_encoded_features = F.concat([F.expand_dims(e, axis=1) for e in encoded_features], axis=1)
lstm_output = self.xp.zeros_like(encoded_features[0])
outputs = []
for _ in range(self.num_labels):
transformed_lstm_output = self.transform_out_lstm_feature(lstm_output)
attended_feats = []
for transformed_encoded_feature in F.separate(transformed_encoded_features, axis=1):
attended_feat = transformed_encoded_feature + transformed_lstm_output
attended_feat = F.tanh(attended_feat)
attended_feats.append(self.generate_attended_feat(attended_feat))
attended_feats = F.concat(attended_feats, axis=1)
alphas = F.softmax(attended_feats, axis=1)
lstm_input_feature = F.batch_matmul(alphas, concat_encoded_features, transa=True)
lstm_input_feature = F.squeeze(lstm_input_feature, axis=1)
lstm_output = self.out_lstm(lstm_input_feature)
outputs.append(lstm_output)
return outputs
示例8: get_gaussian_params
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def get_gaussian_params(self, x):
h = F.tanh(self.l1(x))
h = self.l2(h)
pi = h[:, :self.gaussian_mixtures]
mu_var_dim = self.gaussian_mixtures * self.input_dim
mu = h[:, self.gaussian_mixtures:self.gaussian_mixtures + mu_var_dim]
log_var = h[:, self.gaussian_mixtures + mu_var_dim:]
n_batch = x.shape[0]
# mixing coefficients
pi = F.reshape(pi, (n_batch, self.gaussian_mixtures))
pi = F.softmax(pi, axis=1)
# mean
mu = F.reshape(mu, (n_batch, self.gaussian_mixtures, self.input_dim))
# log variance
log_var = F.reshape(
log_var, (n_batch, self.gaussian_mixtures, self.input_dim))
return pi, mu, log_var
示例9: x_tanh
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def x_tanh(x):
return 0.2*x + F.tanh(x)
示例10: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __init__(self, obs_size, action_space,
n_hidden_layers=2, n_hidden_channels=64,
bound_mean=None):
assert bound_mean in [False, True]
super().__init__()
hidden_sizes = (n_hidden_channels,) * n_hidden_layers
with self.init_scope():
self.pi = policies.FCGaussianPolicyWithStateIndependentCovariance(
obs_size, action_space.low.size,
n_hidden_layers, n_hidden_channels,
var_type='diagonal', nonlinearity=F.tanh,
bound_mean=bound_mean,
min_action=action_space.low, max_action=action_space.high,
mean_wscale=1e-2)
self.v = links.MLP(obs_size, 1, hidden_sizes=hidden_sizes)
示例11: __init__
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def __init__(self, obs_size, action_size):
self.pi = policies.FCGaussianPolicyWithFixedCovariance(
obs_size,
action_size,
np.log(np.e - 1),
n_hidden_layers=2,
n_hidden_channels=64,
nonlinearity=F.tanh)
self.v = v_function.FCVFunction(obs_size, n_hidden_layers=2,
n_hidden_channels=64,
nonlinearity=F.tanh)
super().__init__(self.pi, self.v)
示例12: _step_rnn_tanh
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def _step_rnn_tanh(rnn, x, state):
assert isinstance(rnn, L.NStepRNNTanh)
assert len(rnn.ws) == 1
assert len(rnn.bs) == 1
assert len(rnn.ws[0]) == 2
assert len(rnn.bs[0]) == 2
if state is None:
xp = rnn.xp
h = xp.zeros((len(x), rnn.out_size), dtype=np.float32)
else:
h = state
w0, w1 = rnn.ws[0]
b0, b1 = rnn.bs[0]
h = F.tanh(F.linear(x, w0, b0) + F.linear(h, w1, b1))
return h, h
示例13: scale_by_tanh
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def scale_by_tanh(x, low, high):
xp = cuda.get_array_module(x.array)
scale = (high - low) / 2
scale = xp.expand_dims(xp.asarray(scale, dtype=np.float32), axis=0)
mean = (high + low) / 2
mean = xp.expand_dims(xp.asarray(mean, dtype=np.float32), axis=0)
return F.tanh(x) * scale + mean
示例14: most_probable
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def most_probable(self):
return F.tanh(self.mean)
示例15: sample_with_log_prob
# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import tanh [as 别名]
def sample_with_log_prob(self):
x = F.gaussian(self.mean, self.ln_var)
normal_log_prob = _eltwise_gaussian_log_likelihood(
x, self.mean, self.var, self.ln_var)
log_probs = normal_log_prob - _tanh_forward_log_det_jacobian(x)
y = F.tanh(x)
return y, F.sum(log_probs, axis=1)