本文整理汇总了Python中tensorflow.constant方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.constant方法的具体用法?Python tensorflow.constant怎么用?Python tensorflow.constant使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.constant方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_adam
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def test_adam(self):
with self.test_session() as sess:
w = tf.get_variable(
"w",
shape=[3],
initializer=tf.constant_initializer([0.1, -0.2, -0.1]))
x = tf.constant([0.4, 0.2, -0.5])
loss = tf.reduce_mean(tf.square(x - w))
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
global_step = tf.train.get_or_create_global_step()
optimizer = optimization.AdamWeightDecayOptimizer(learning_rate=0.2)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step)
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess.run(init_op)
for _ in range(100):
sess.run(train_op)
w_np = sess.run(w)
self.assertAllClose(w_np.flat, [0.4, 0.2, -0.5], rtol=1e-2, atol=1e-2)
示例2: noise_input_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def noise_input_fn(params):
"""Input function for generating samples for PREDICT mode.
Generates a single Tensor of fixed random noise. Use tf.data.Dataset to
signal to the estimator when to terminate the generator returned by
predict().
Args:
params: param `dict` passed by TPUEstimator.
Returns:
1-element `dict` containing the randomly generated noise.
"""
# random noise
np.random.seed(0)
noise_dataset = tf.data.Dataset.from_tensors(tf.constant(
np.random.randn(params['batch_size'], FLAGS.noise_dim), dtype=tf.float32))
noise = noise_dataset.make_one_shot_iterator().get_next()
return {'random_noise': noise}, None
示例3: structure
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def structure(self, input_tensor):
"""
Args:
input_tensor: NHWC
"""
rnd = tf.random_uniform((), 135, 160, dtype=tf.int32)
rescaled = tf.image.resize_images(
input_tensor, [rnd, rnd], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
h_rem = 160 - rnd
w_rem = 160 - rnd
pad_left = tf.random_uniform((), 0, w_rem, dtype=tf.int32)
pad_right = w_rem - pad_left
pad_top = tf.random_uniform((), 0, h_rem, dtype=tf.int32)
pad_bottom = h_rem - pad_top
padded = tf.pad(rescaled, [[0, 0], [pad_top, pad_bottom], [
pad_left, pad_right], [0, 0]])
padded.set_shape((input_tensor.shape[0], 160, 160, 3))
output = tf.cond(tf.random_uniform(shape=[1])[0] < tf.constant(0.9),
lambda: padded, lambda: input_tensor)
return output
示例4: test_clip_eta_goldilocks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def test_clip_eta_goldilocks(self):
# Test that the clipping handles perturbations that are
# too small, just right, and too big correctly
eta = tf.constant([[2.], [3.], [4.]])
assert eta.dtype == tf.float32, eta.dtype
eps = 3.
for ord_arg in [np.inf, 1, 2]:
for sign in [-1., 1.]:
clipped = clip_eta(eta * sign, ord_arg, eps)
clipped_value = self.sess.run(clipped)
gold = sign * np.array([[2.], [3.], [3.]])
self.assertClose(clipped_value, gold)
grad, = tf.gradients(clipped, eta)
grad_value = self.sess.run(grad)
# Note: the second 1. is debatable (the left-sided derivative
# and the right-sided derivative do not match, so formally
# the derivative is not defined). This test makes sure that
# we at least handle this oddity consistently across all the
# argument values we test
gold = sign * np.array([[1.], [1.], [0.]])
assert np.allclose(grad_value, gold)
示例5: test_drop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def test_drop():
# Make sure dropout is activated successfully
# We would like to configure the test to deterministically drop,
# so that the test does not need to use multiple runs.
# However, tf.nn.dropout divides by include_prob, so zero or
# infinitesimal include_prob causes NaNs.
# 1e-8 does not cause NaNs and shouldn't be a significant source
# of test flakiness relative to dependency downloads failing, etc.
model = MLP(input_shape=[1, 1], layers=[Dropout(name='output',
include_prob=1e-8)])
x = tf.constant([[1]], dtype=tf.float32)
y = model.get_layer(x, 'output', dropout=True)
sess = tf.Session()
y_value = sess.run(y)
# Subject to very rare random failure because include_prob is not exact 0
assert y_value == 0., y_value
示例6: _inv_preemphasis
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def _inv_preemphasis(x):
N = tf.shape(x)[0]
i = tf.constant(0)
W = tf.zeros(shape=tf.shape(x), dtype=tf.float32)
def condition(i, y):
return tf.less(i, N)
def body(i, y):
tmp = tf.slice(x, [0], [i + 1])
tmp = tf.concat([tf.zeros([N - i - 1]), tmp], -1)
y = hparams.preemphasis * y + tmp
i = tf.add(i, 1)
return [i, y]
final = tf.while_loop(condition, body, [i, W])
y = final[1]
return y
示例7: check_tensor_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def check_tensor_shape(tensor_tf, target_shape):
""" Return a Tensorflow boolean graph that indicates whether
sample[features_key] has the specified target shape. Only check
not None entries of target_shape.
:param tensor_tf: Tensor to check shape for.
:param target_shape: Target shape to compare tensor to.
:returns: True if shape is valid, False otherwise (as TF boolean).
"""
result = tf.constant(True)
for i, target_length in enumerate(target_shape):
if target_length:
result = tf.logical_and(
result,
tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
return result
示例8: testCreateLogisticClassifier
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
示例9: testCreateSingleclone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
示例10: testCreateOnecloneWithPS
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
示例11: testNoSummariesOnGPUForEvals
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
示例12: _setup_learning_rate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def _setup_learning_rate(config, global_step):
"""Sets up the learning rate with optional exponential decay.
Args:
config: Object containing learning rate configuration parameters.
global_step: Tensor; the global step.
Returns:
learning_rate: Tensor; the learning rate with exponential decay.
"""
if config.learning_rate_decay_factor > 0:
learning_rate = tf.train.exponential_decay(
learning_rate=float(config.learning_rate),
global_step=global_step,
decay_steps=config.learning_rate_decay_steps,
decay_rate=config.learning_rate_decay_factor,
staircase=False)
else:
learning_rate = tf.constant(config.learning_rate)
return learning_rate
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def __init__(self,
vocab_size,
embedding_dim,
normalize=False,
vocab_freqs=None,
keep_prob=1.,
**kwargs):
self.vocab_size = vocab_size
self.embedding_dim = embedding_dim
self.normalized = normalize
self.keep_prob = keep_prob
if normalize:
assert vocab_freqs is not None
self.vocab_freqs = tf.constant(
vocab_freqs, dtype=tf.float32, shape=(vocab_size, 1))
super(Embedding, self).__init__(**kwargs)
示例14: initialize_fakes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def initialize_fakes(self):
self.images_shape = (self.batch_size, self.image_height, self.image_width,
3)
self.fake_images = tf.constant(
self.rng.randint(low=0, high=255,
size=self.images_shape).astype('float32'),
name='input_node')
self.fake_conv_tower_np = self.rng.randn(
*self.conv_tower_shape).astype('float32')
self.fake_conv_tower = tf.constant(self.fake_conv_tower_np)
self.fake_logits = tf.constant(
self.rng.randn(*self.chars_logit_shape).astype('float32'))
self.fake_labels = tf.constant(
self.rng.randint(
low=0,
high=self.num_char_classes,
size=(self.batch_size, self.seq_length)).astype('int64'))
示例15: lstm_setup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import constant [as 别名]
def lstm_setup(name, x, batch_size, is_single_step, lstm_dim, lstm_out,
num_steps, state_input_op):
# returns state_name, state_init_op, updated_state_op, out_op
with tf.name_scope('reshape_'+name):
sh = x.get_shape().as_list()
x = tf.reshape(x, shape=[batch_size, -1, sh[-1]])
with tf.variable_scope(name) as varscope:
cell = tf.contrib.rnn.LSTMCell(
num_units=lstm_dim, forget_bias=1.0, state_is_tuple=False,
num_proj=lstm_out, use_peepholes=True,
initializer=tf.random_uniform_initializer(-0.01, 0.01, seed=0),
cell_clip=None, proj_clip=None)
sh = [batch_size, 1, lstm_dim+lstm_out]
state_init_op = tf.constant(0., dtype=tf.float32, shape=sh)
fn = lambda ns: lstm_online(cell, ns, x, state_input_op, varscope)
out_op, updated_state_op = tf.cond(is_single_step, lambda: fn(1), lambda:
fn(num_steps))
return name, state_init_op, updated_state_op, out_op