本文整理汇总了Python中tensorflow.compat.v1.constant_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python v1.constant_initializer方法的具体用法?Python v1.constant_initializer怎么用?Python v1.constant_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.constant_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_vq_bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[bottleneck_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例2: get_vq_codebook
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def get_vq_codebook(codebook_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
means = tf.get_variable(
name="means",
shape=[codebook_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[codebook_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例3: _address_content
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def _address_content(self, x):
"""Address the memory based on content similarity.
Args:
x: a tensor in the shape of [batch_size, length, depth].
Returns:
the logits for each memory entry [batch_size, length, memory_size].
"""
mem_keys = tf.layers.dense(self.mem_vals, self.key_depth,
bias_initializer=tf.constant_initializer(1.0),
name="mem_key")
mem_query = tf.layers.dense(x, self.key_depth,
bias_initializer=tf.constant_initializer(1.0),
name="mem_query")
norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys),
transpose_b=True)
dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True)
cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist")
access_logits = self.sharpen_factor * cos_dist
return access_logits
示例4: test_adam
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(list(zip(grads, tvars)), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
示例5: build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def build(self, inputs_shape):
if not inputs_shape[1]:
raise ValueError(
"Expecting inputs_shape[1] to be set: %s" % str(inputs_shape))
input_size = int(inputs_shape[1])
self._kernel = self.add_variable(
self._names["W"], [input_size + self._num_units, self._num_units * 4])
self._bias = self.add_variable(
self._names["b"], [self._num_units * 4],
initializer=tf.constant_initializer(0.0))
if self._use_peephole:
self._w_i_diag = self.add_variable(self._names["wci"], [self._num_units])
self._w_f_diag = self.add_variable(self._names["wcf"], [self._num_units])
self._w_o_diag = self.add_variable(self._names["wco"], [self._num_units])
self.built = True
示例6: testInputProjectionWrapper
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def testInputProjectionWrapper(self):
with self.cached_session() as sess:
with tf.variable_scope(
"root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 3])
cell = contrib_rnn.InputProjectionWrapper(
rnn_cell.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, new_m], {
x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])
})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
示例7: testFCIntervalBounds
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def testFCIntervalBounds(self):
m = snt.Linear(1, initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
m(z) # Connect to create weights.
m = ibp.LinearFCWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(5., l)
self.assertAlmostEqual(11., u)
示例8: testConv1dIntervalBounds
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def testConv1dIntervalBounds(self):
m = snt.Conv1D(
output_channels=1,
kernel_shape=2,
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv1dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(7., l)
self.assertAlmostEqual(11., u)
示例9: testBatchNormIntervalBounds
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def testBatchNormIntervalBounds(self):
z = tf.constant([[1, 2, 3]], dtype=tf.float32)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
g = tf.reshape(tf.range(-1, 2, dtype=tf.float32), [1, 3])
b = tf.reshape(tf.range(3, dtype=tf.float32), [1, 3])
batch_norm = ibp.BatchNorm(scale=True, offset=True, eps=0., initializers={
'gamma': lambda *args, **kwargs: g,
'beta': lambda *args, **kwargs: b,
'moving_mean': tf.constant_initializer(1.),
'moving_variance': tf.constant_initializer(4.),
})
batch_norm(z, is_training=False)
batch_norm = ibp.BatchNormWrapper(batch_norm)
# Test propagation.
output_bounds = batch_norm.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
self.assertAlmostEqual([[-.5, 1., 2.5]], l.tolist())
self.assertAlmostEqual([[.5, 1., 3.5]], u.tolist())
示例10: learned_model_train_fn
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def learned_model_train_fn(features,
labels,
inference_outputs,
mode=None,
config=None,
params=None):
"""A model_train_fn where the loss function itself is learned."""
del features, labels, mode, config, params
with tf.variable_scope('learned_loss', reuse=tf.AUTO_REUSE):
learned_label = tf.get_variable(
'learned_label',
shape=(1,),
dtype=tf.float32,
initializer=tf.constant_initializer([1.0], dtype=tf.float32))
return tf.losses.mean_squared_error(
labels=learned_label, predictions=inference_outputs['prediction'])
示例11: lstm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
"""lstm cell"""
_, nin = [v.value for v in xs[0].get_shape()] # the first is nbatch
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
示例12: layer_norm
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def layer_norm(layer_inputs, hidden_size):
"""Implements layer norm from [Ba et al. 2016] Layer Normalization.
See eqn. 4 in (https://arxiv.org/pdf/1607.06450.pdf).
Args:
layer_inputs (tensor): The inputs to the layer.
shape <float32>[batch_size, hidden_size]
hidden_size (int): Dimensionality of the hidden layer.
Returns:
normalized (tensor): layer_inputs, normalized over all the hidden units in
the layer.
shape <float32>[batch_size, hidden_size]
"""
mean, var = tf.nn.moments(layer_inputs, [1], keep_dims=True)
with tf.variable_scope("layernorm", reuse=tf.AUTO_REUSE):
gain = tf.get_variable(
"gain", shape=[hidden_size], initializer=tf.constant_initializer(1))
bias = tf.get_variable(
"bias", shape=[hidden_size], initializer=tf.constant_initializer(0))
normalized = gain * (layer_inputs - mean) / tf.sqrt(var + _EPSILON) + bias
return normalized
示例13: compute_attention
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def compute_attention(t1, t2):
"""Build an attention matrix between 3-tensors `t1` and `t2`.
Args:
t1: <tf.float32>[batch, seq_len1, dim1]
t2: <tf.float32>[batch, seq_len2, dim2]
Returns:
the similarity scores <tf.float32>[batch, seq_len1, seq_len2]
"""
dim = t1.shape.as_list()[2]
init = tf.constant_initializer(1.0 / dim)
t1_logits = ops.last_dim_weighted_sum(t1, "t1_w")
t2_logits = ops.last_dim_weighted_sum(t2, "t2_w")
dot_w = tf.get_variable(
"dot_w", shape=dim, initializer=init, dtype=tf.float32)
# Compute x * dot_weights first, then batch mult with x
dots = t1 * tf.expand_dims(tf.expand_dims(dot_w, 0), 0)
dot_logits = tf.matmul(dots, t2, transpose_b=True)
return dot_logits + \
tf.expand_dims(t1_logits, 2) + \
tf.expand_dims(t2_logits, 1)
示例14: cifarnet_arg_scope
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
示例15: add_inference
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import constant_initializer [as 别名]
def add_inference(self, cnn):
# This model only supports 1x1 images with 1 channel
assert cnn.top_layer.shape[1:] == (1, 1, 1)
# Multiply by variable A.
with tf.name_scope('mult_by_var_A'):
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
kernel_initializer=tf.constant_initializer(
self.VAR_A_INITIAL_VALUE))
# Multiply by variable B.
with tf.name_scope('mult_by_var_B'):
cnn.conv(1, 1, 1, 1, 1, use_batch_norm=None, activation=None, bias=None,
kernel_initializer=tf.constant_initializer(
self.VAR_B_INITIAL_VALUE))
with tf.name_scope('reshape_to_scalar'):
cnn.reshape([-1, 1])