本文整理汇总了Python中tensorflow.compat.v1.random_uniform方法的典型用法代码示例。如果您正苦于以下问题:Python v1.random_uniform方法的具体用法?Python v1.random_uniform怎么用?Python v1.random_uniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.random_uniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testBuildLogitsCifarModel
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def testBuildLogitsCifarModel(self):
batch_size = 5
height, width = 32, 32
num_classes = 10
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_cifar_arg_scope()):
logits, end_points = nasnet.build_nasnet_cifar(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
示例2: testBuildLogitsMobileModel
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_mobile_arg_scope()):
logits, end_points = nasnet.build_nasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
示例3: testBuildLogitsLargeModel
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(nasnet.nasnet_large_arg_scope()):
logits, end_points = nasnet.build_nasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
示例4: get_synthetic_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
"""Returns the ops to generate synthetic inputs and labels."""
def users_init_val():
return tf.random_uniform((self.batch_size, 1), minval=0,
maxval=_NUM_USERS_20M, dtype=tf.int32)
users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_users')
def items_init_val():
return tf.random_uniform((self.batch_size, 1), minval=0,
maxval=_NUM_ITEMS_20M, dtype=tf.int32)
items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_items')
def labels_init_val():
return tf.random_uniform((self.batch_size,), minval=0, maxval=2,
dtype=tf.int32)
labels = tf.Variable(labels_init_val, dtype=tf.int32, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name='synthetic_labels')
return [users, items, labels]
示例5: get_synthetic_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
inputs = tf.random_uniform(self.get_input_shapes('train')[0],
dtype=self.get_input_data_types('train')[0])
inputs = variables.VariableV1(inputs, trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES],
name=input_name)
labels = tf.convert_to_tensor(
np.random.randint(28, size=[self.batch_size, self.max_label_length]))
input_lengths = tf.convert_to_tensor(
[self.max_time_steps] * self.batch_size)
label_lengths = tf.convert_to_tensor(
[self.max_label_length] * self.batch_size)
return [inputs, labels, input_lengths, label_lengths]
# TODO(laigd): support fp16.
# TODO(laigd): support multiple gpus.
示例6: get_synthetic_inputs
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def get_synthetic_inputs(self, input_name, nclass):
# Synthetic input should be within [0, 255].
image_shape, label_shape = self.get_input_shapes('train')
inputs = tf.truncated_normal(
image_shape,
dtype=self.data_type,
mean=127,
stddev=60,
name=self.model_name + '_synthetic_inputs')
inputs = variables_module.VariableV1(
inputs, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES],
name=input_name)
labels = tf.random_uniform(
label_shape,
minval=0,
maxval=nclass - 1,
dtype=tf.int32,
name=self.model_name + '_synthetic_labels')
return (inputs, labels)
示例7: _build
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def _build(self, x, state):
prev_keep_mask = state
shape = tf.shape(x)
noise = tf.random_uniform(shape, dtype=x.dtype)
other_mask = tf.floor(self._keep_prob + noise)
choice_noise = tf.random_uniform(shape, dtype=x.dtype)
choice = tf.less(choice_noise, self._flip_prob)
# KLUDGE(melisgl): The client has to pass the last keep_mask from
# a batch to the next so the mask may end up next to some
# recurrent cell state. This state is often zero at the beginning
# and may be periodically zeroed (per example) during training.
# While zeroing LSTM state is okay, zeroing the dropout mask is
# not. So instead of forcing every client to deal with this common
# (?) case, if an all zero mask is detected, then regenerate a
# fresh mask. This is of course a major hack and won't help with
# learnt initial states, for example.
sum_ = tf.reduce_sum(prev_keep_mask, 1, keepdims=True)
is_initializing = tf.equal(sum_, 0.0)
self._keep_mask = tf.where(tf.logical_or(choice, is_initializing),
other_mask,
prev_keep_mask)
self._time_step += 1
return x * self._keep_mask / self._keep_prob * self._scaler
示例8: _quantize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def _quantize(x, params, randomize=True):
"""Quantize x according to params, optionally randomizing the rounding."""
if not params.quantize:
return x
if not randomize:
return tf.bitcast(
tf.cast(x / params.quantization_scale, tf.int16), tf.float16)
abs_x = tf.abs(x)
sign_x = tf.sign(x)
y = abs_x / params.quantization_scale
y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x)))
y = tf.minimum(y, tf.int16.max) * sign_x
q = tf.bitcast(tf.cast(y, tf.int16), tf.float16)
return q
示例9: testShapes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def testShapes(self):
batch_size = 2
beam_size = 3
vocab_size = 4
decode_length = 10
initial_ids = tf.constant([0, 0]) # GO
def symbols_to_logits(_):
# Just return random logits
return tf.random_uniform((batch_size * beam_size, vocab_size))
final_ids, final_probs, _ = beam_search.beam_search(
symbols_to_logits, initial_ids, beam_size, decode_length, vocab_size,
0.)
self.assertEqual(final_ids.get_shape().as_list(), [None, beam_size, None])
self.assertEqual(final_probs.get_shape().as_list(), [batch_size, beam_size])
示例10: uniform_binning_correction
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective
示例11: sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def sample(self, features=None, shape=None):
del features
hp = self.hparams
div_x = 2**hp.num_hidden_layers
div_y = 1 if self.is1d else 2**hp.num_hidden_layers
size = [
hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y,
hp.bottleneck_bits
]
size = size if shape is None else shape
rand = tf.random_uniform(size)
res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0
# If you want to set some first bits to a fixed value, do this:
# fixed = tf.zeros_like(rand) - 1.0
# nbits = 3
# res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1)
return res
示例12: bottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def bottleneck(self, x): # pylint: disable=arguments-differ
hparams = self.hparams
if hparams.unordered:
return super(AutoencoderOrderedDiscrete, self).bottleneck(x)
noise = hparams.bottleneck_noise
hparams.bottleneck_noise = 0.0 # We'll add noise below.
x, loss = discretization.parametrized_bottleneck(x, hparams)
hparams.bottleneck_noise = noise
if hparams.mode == tf.estimator.ModeKeys.TRAIN:
# We want a number p such that p^bottleneck_bits = 1 - noise.
# So log(p) * bottleneck_bits = log(noise)
log_p = tf.log1p(-float(noise) / 2) / float(hparams.bottleneck_bits)
# Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits.
noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1))
# Having the no-noise mask, we can make noise just uniformly at random.
ordered_noise = tf.random_uniform(tf.shape(x))
# We want our noise to be 1s at the start and random {-1, 1} bits later.
ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise))
# Now we flip the bits of x on the noisy positions (ordered and normal).
x *= 2.0 * ordered_noise - 1
return x, loss
示例13: test_invertibility
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def test_invertibility(self, op, name, dropout=0.0):
with tf.Graph().as_default():
tf.set_random_seed(42)
x = tf.random_uniform(shape=(16, 32, 32, 4))
if op in [glow_ops.affine_coupling, glow_ops.additive_coupling]:
with arg_scope([glow_ops.get_dropout], init=False):
x_inv, _ = op(name, x, reverse=False, dropout=dropout)
x_inv_inv, _ = op(name, x_inv, reverse=True, dropout=dropout)
else:
x_inv, _ = op(name, x, reverse=False)
x_inv_inv, _ = op(name, x_inv, reverse=True)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
diff = session.run(x - x_inv_inv)
self.assertTrue(np.allclose(diff, 0.0, atol=1e-5))
示例14: test_conv2d
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def test_conv2d(self):
with tf.Graph().as_default():
x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32))
with arg_scope([glow_ops.actnorm], init=True):
actnorm_conv2d = glow_ops.conv(
"actnorm_conv2d", x, output_channels=64, apply_actnorm=True)
actnorm_zeros2d = glow_ops.conv(
"actnorm_zeros2d", x, output_channels=64, apply_actnorm=False)
with tf.Session() as session:
session.run(tf.global_variables_initializer())
# test if apply_actnorm is set to True, the first minibatch has
# zero mean and unit variance.
actnorm_np, zeros_np = session.run([actnorm_conv2d, actnorm_zeros2d])
self.assertEqual(actnorm_np.shape, (16, 5, 5, 64))
mean = np.mean(actnorm_np, axis=(0, 1, 2))
var = np.var(actnorm_np, axis=(0, 1, 2))
self.assertTrue(np.allclose(mean, 0.0, atol=1e-5))
self.assertTrue(np.allclose(var, 1.0, atol=1e-5))
# test shape in case apply_actnorm is set to False,
self.assertEqual(zeros_np.shape, (16, 5, 5, 64))
示例15: _apply_encoder_layer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform [as 别名]
def _apply_encoder_layer(translation_layer, output_depth, nonpadding_list):
"""Applies an encoder layer with basic arguments."""
input_tensor = tf.random_uniform(
[_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, _INPUT_DEPTH]) / 4.0
nonpadding = tf.constant(nonpadding_list)
residual_tensor = tf.random_uniform(
[_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, output_depth])
hparams = transformer.transformer_base()
return translation_layer.apply_layer(
input_tensor,
residual_tensor,
output_depth,
tf.nn.relu,
hparams,
"",
mask_future=False,
nonpadding=nonpadding,
layer_preprocess_fn=None,
postprocess_dropout=True)