本文整理汇总了Python中tensorflow.compat.v1.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python v1.tanh方法的具体用法?Python v1.tanh怎么用?Python v1.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_internal_states_early
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def update_internal_states_early(self, internal_states, frames):
"""Update the internal states early in the network in GRU-like way."""
batch_size = common_layers.shape_list(frames[0])[0]
internal_state = internal_states[0][0][:batch_size, :, :, :]
state_activation = tf.concat([internal_state, frames[0]], axis=-1)
state_gate_candidate = tf.layers.conv2d(
state_activation, 2 * self.hparams.recurrent_state_size,
(3, 3), padding="SAME", name="state_conv")
state_gate, state_candidate = tf.split(state_gate_candidate, 2, axis=-1)
state_gate = tf.nn.sigmoid(state_gate)
state_candidate = tf.tanh(state_candidate)
internal_state = internal_state * state_gate
internal_state += state_candidate * (1.0 - state_gate)
max_batch_size = max(_MAX_BATCH, self.hparams.batch_size)
diff_batch_size = max_batch_size - batch_size
internal_state = tf.pad(
internal_state, [[0, diff_batch_size], [0, 0], [0, 0], [0, 0]])
return [[internal_state]]
示例2: conv_lstm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def conv_lstm(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional LSTM in 1 dimension."""
with tf.variable_scope(
name, default_name="conv_lstm", values=[x], reuse=reuse):
gates = conv(
x,
4 * filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate)
g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3)
new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3])
return tf.sigmoid(g[2]) * tf.tanh(new_cell)
示例3: tanh_discrete_bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
discretize_warmup_steps, mode):
"""Simple discretization through tanh, flip bottleneck_noise many bits."""
x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
if mode == tf.estimator.ModeKeys.TRAIN:
x += tf.truncated_normal(
common_layers.shape_list(x), mean=0.0, stddev=0.2)
x = tf.tanh(x)
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
d *= noise
d = common_layers.mix(d, x, discretize_warmup_steps,
mode == tf.estimator.ModeKeys.TRAIN)
return d, d0
示例4: generator_fn_specgram
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def generator_fn_specgram(inputs, **kwargs):
"""Builds generator network."""
# inputs = (noises, one_hot_labels)
with tf.variable_scope('generator_cond'):
z = tf.concat(inputs, axis=1)
if kwargs['to_rgb_activation'] == 'tanh':
to_rgb_activation = tf.tanh
elif kwargs['to_rgb_activation'] == 'linear':
to_rgb_activation = lambda x: x
fake_images, end_points = networks.generator(
z,
kwargs['progress'],
lambda block_id: _num_filters_fn(block_id, **kwargs),
kwargs['resolution_schedule'],
num_blocks=kwargs['num_blocks'],
kernel_size=kwargs['kernel_size'],
colors=2,
to_rgb_activation=to_rgb_activation,
simple_arch=kwargs['simple_arch'])
shape = fake_images.shape
normalizer = data_normalizer.registry[kwargs['data_normalizer']](kwargs)
fake_images = normalizer.denormalize_op(fake_images)
fake_images.set_shape(shape)
return fake_images, end_points
示例5: lstm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
"""lstm cell"""
_, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
示例6: apply_highway_lstm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def apply_highway_lstm(x, seq_len):
"""Run a bi-directional LSTM with highway connections over `x`.
Args:
x: <tf.float32>[batch, seq_len, dim]
seq_len: <tf.int32>[batch] for None, sequence lengths of `seq2`
Returns:
out, <tf.float32>[batch, seq_len, out_dim]
"""
lstm_out = apply_lstm(x, seq_len)
proj = ops.affine(x, FLAGS.lstm_dim * 4, "w", bias_name="b")
gate, transform = tf.split(proj, 2, 2)
gate = tf.sigmoid(gate)
transform = tf.tanh(transform)
return lstm_out * gate + (1 - gate) * transform
示例7: create_nn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def create_nn(self, features, name=None):
if name is None:
name = self.actor_name
with tf.variable_scope(name + '_fc_1'):
fc1 = layer(features, 64)
with tf.variable_scope(name + '_fc_2'):
fc2 = layer(fc1, 64)
with tf.variable_scope(name + '_fc_3'):
fc3 = layer(fc2, 64)
with tf.variable_scope(name + '_fc_4'):
fc4 = layer(fc3, self.action_space_size, is_output=True)
output = tf.tanh(fc4) * self.action_space_bounds + self.action_offset
return output
示例8: create_nn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def create_nn(self, features, name=None):
if name is None:
name = self.actor_name
with tf.variable_scope(name + '_fc_1'):
fc1 = layer(features, 64)
with tf.variable_scope(name + '_fc_2'):
fc2 = layer(fc1, 64)
with tf.variable_scope(name + '_fc_3'):
fc3 = layer(fc2, 64)
with tf.variable_scope(name + '_fc_4'):
fc4 = layer(fc3, self.action_space_size, is_output=True)
output = tf.tanh(fc4) * self.action_space_bounds + self.action_offset
return output
示例9: _make_net
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def _make_net(self, reg):
'''
Helper method to create a new net with a specified regularisation coefficient. The net is not
initialised, so you must call init() or load() on it before any other method.
Args:
reg (float): Regularisation coefficient.
'''
def gelu_fast(_x):
return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
creator = lambda: SingleNeuralNet(
self.num_params,
[64]*5, [gelu_fast]*5,
0.2, # train_threshold_ratio
16, # batch_size
1., # keep_prob
reg,
self.losses_list,
learner_archive_dir=self.learner_archive_dir,
start_datetime=self.start_datetime)
return SampledNeuralNet(creator, 1)
示例10: test_forward_unary
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def test_forward_unary():
def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
"""test unary operators"""
np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
tf.reset_default_graph()
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
out = op(in_data)
compare_tf_with_tvm([np_data], ['in_data:0'], out.name)
_test_forward_unary(tf.acos, -1, 1)
_test_forward_unary(tf.asin, -1, 1)
_test_forward_unary(tf.atanh, -1, 1)
_test_forward_unary(tf.sinh)
_test_forward_unary(tf.cosh)
_test_forward_unary(tf.acosh)
_test_forward_unary(tf.asinh)
_test_forward_unary(tf.atan)
_test_forward_unary(tf.sin)
_test_forward_unary(tf.cos)
_test_forward_unary(tf.tan)
_test_forward_unary(tf.tanh)
_test_forward_unary(tf.erf)
_test_forward_unary(tf.log)
_test_forward_unary(tf.log1p)
示例11: feed_forward_gaussian_fun
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def feed_forward_gaussian_fun(action_space, config, observations):
"""Feed-forward Gaussian."""
if not isinstance(action_space, gym.spaces.box.Box):
raise ValueError("Expecting continuous action space.")
mean_weights_initializer = tf.initializers.variance_scaling(
scale=config.init_mean_factor)
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
mean = tf.layers.dense(
x, action_space.shape[0], activation=tf.tanh,
kernel_initializer=mean_weights_initializer)
logstd = tf.get_variable(
"logstd", mean.shape[2:], tf.float32, logstd_initializer)
logstd = tf.tile(
logstd[None, None],
[tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2))
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.layers.dense(x, size, activation=tf.nn.relu)
value = tf.layers.dense(x, 1)[..., 0]
mean = tf.check_numerics(mean, "mean")
logstd = tf.check_numerics(logstd, "logstd")
value = tf.check_numerics(value, "value")
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2))
示例12: gated_linear_map
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def gated_linear_map(self, inputs, suffix, bias_start_reset, in_units,
out_units):
"""Linear mapping with two reset gates.
Args:
inputs: Input tensor
suffix: Linear map name suffix
bias_start_reset: Bias start value for reset gate
in_units: Size of input tensor feature map count
out_units: Size of output tensor feature map count
Return:
tf.Tensor: Convolution apply to input tensor
"""
def reset_gate(name):
prefix = self.prefix + name + suffix
reset = conv_linear_map(inputs, in_units * 2, in_units * 2,
bias_start_reset, prefix)
return tf.nn.sigmoid(reset)
in_shape = [self.batch_size, self.length // 2, in_units * 2]
inputs = tf.reshape(inputs, in_shape)
reset1 = reset_gate("/reset1/")
reset2 = reset_gate("/reset2/")
res1 = conv_linear_map(inputs * reset1, in_units * 2, out_units, 0.0,
self.prefix + "/cand1/" + suffix)
res2 = conv_linear_map(inputs * reset2, in_units * 2, out_units, 0.0,
self.prefix + "/cand2/" + suffix)
res = tf.concat([res1, res2], axis=2)
res = tf.reshape(res, [self.batch_size, self.length, out_units])
return tf.nn.tanh(res)
示例13: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def bottleneck(self, x):
with tf.variable_scope("bottleneck"):
hparams = self.hparams
x = tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
noise = 2.0 * tf.random_uniform(common_layers.shape_list(x)) - 1.0
return tf.tanh(x) + noise * hparams.bottleneck_noise, 0.0
return tf.tanh(x), 0.0
示例14: unstack
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def unstack(self, b, size, bottleneck_bits, name):
with tf.variable_scope(name + "_unstack"):
unb = self.unbottleneck(b, size)
dec = self.decoder(unb)
pred = tf.layers.dense(dec, bottleneck_bits, name="pred")
pred_shape = common_layers.shape_list(pred)
pred1 = tf.reshape(pred, pred_shape[:-1] + [-1, 2])
x, y = tf.split(pred1, 2, axis=-1)
x = tf.squeeze(x, axis=[-1])
y = tf.squeeze(y, axis=[-1])
gt = 2.0 * tf.to_float(tf.less(x, y)) - 1.0
gtc = tf.tanh(y - x)
gt += gtc - tf.stop_gradient(gtc)
return gt, pred1
示例15: discriminator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import tanh [as 别名]
def discriminator(x, compress, hparams, name, reuse=None):
with tf.variable_scope(name, reuse=reuse):
x = tf.stop_gradient(2 * x) - x # Reverse gradient.
if compress:
x = transformer_vae.compress(x, None, False, hparams, "compress")
else:
x = transformer_vae.residual_conv(x, 1, 3, hparams, "compress_rc")
y = tf.reduce_mean(x, axis=1)
return tf.tanh(tf.layers.dense(y, 1, name="reduce"))