本文整理汇总了Python中tensorflow.compat.v1.ones方法的典型用法代码示例。如果您正苦于以下问题:Python v1.ones方法的具体用法?Python v1.ones怎么用?Python v1.ones使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.ones方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _ensure_keep_mask
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def _ensure_keep_mask(self, x):
if self._keep_mask is None or not self._share_mask:
shape = tf.shape(x)
k = shape[1]
# To make this class a drop-in replacement for bernoulli dropout we
# paramaterize it with keep_prob. Set alpha of the dirichlet so that the
# variance is equal to the variance of the bernoulli with p=keep_prob
# divided by keep_prob.
# Now the variance of the dirichlet with k equal alphas is
# (k-1)/(k^2(k*alpha+1). Solve that for alpha.
kf = tf.cast(k, tf.float32)
alpha = self._keep_prob * (kf - 1.0) / ((1-self._keep_prob)*kf) - 1.0/kf
dist = tfp.distributions.Dirichlet(tf.ones(shape=k) * alpha)
assert (dist.reparameterization_type ==
tfp.distributions.FULLY_REPARAMETERIZED)
# The E[dir(alpha)] = 1/k for all elements, but we want the expectation to
# be keep_prob, hence the multiplication.
self._keep_mask = kf * dist.sample(shape[0])
self._keep_mask.set_shape(x.get_shape())
return self._keep_mask
示例2: uniform_binning_correction
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective
示例3: compute_last_embedding
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def compute_last_embedding(input_embeddings, input_lengths, hparams):
"""Computes average of last K embedding.
Args:
input_embeddings: <tf.float32>[bs, max_seq_len, emb_dim]
input_lengths: <tf.int64>[bs, 1]
hparams: model hparams
Returns:
last_k_embedding: <tf.float32>[bs, emb_dim]
"""
max_seq_len = tf.shape(input_embeddings)[1]
# <tf.float32>[bs, 1, max_seq_len]
mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32)
del_mask = tf.sequence_mask(
input_lengths - hparams.last_k, max_seq_len, dtype=tf.float32)
final_mask = mask - del_mask
# <tf.float32>[bs, 1, emb_dim]
sum_embedding = tf.matmul(final_mask, input_embeddings)
# <tf.float32>[bs, 1, emb_dim]
last_k_embedding = sum_embedding / tf.to_float(
tf.expand_dims(
tf.ones([tf.shape(input_embeddings)[0], 1]) * hparams.last_k, 2))
# <tf.float32>[bs, dim]
return tf.squeeze(last_k_embedding, 1)
示例4: testLossCostDecorated
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def testLossCostDecorated(self):
image = tf.constant(0.0, shape=[1, 3, 3, 3])
kernel = tf.ones([1, 1, 3, 2])
pred = tf.nn.conv2d(image, kernel, strides=[1, 1, 1, 1], padding='SAME')
conv = pred.op
self.group_lasso_reg = flop_regularizer.GroupLassoFlopsRegularizer(
[conv],
0.1,
l1_fraction=0,
regularizer_decorator=dummy_decorator.DummyDecorator,
decorator_parameters={'scale': 0.5})
# we compare the computed cost and regularization calculated as follows:
# reg_term = op_coeff * (number_of_inputs * (regularization=2 * 0.5) +
# number_of_outputs * (input_regularization=0))
# number_of_flops = coeff * number_of_inputs * number_of_outputs.
with self.cached_session():
pred_reg = self.group_lasso_reg.get_regularization_term([conv]).eval()
self.assertEqual(_coeff(conv) * 3 * 1, pred_reg)
pred_cost = self.group_lasso_reg.get_cost([conv]).eval()
self.assertEqual(_coeff(conv) * 2 * NUM_CHANNELS, pred_cost)
示例5: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def __init__(self, regularizers_to_group):
"""Creates an instance.
Args:
regularizers_to_group: A list of generic_regularizers.OpRegularizer
objects.Their regularization_vector (alive_vector) are expected to be of
the same length.
Raises:
ValueError: regularizers_to_group is not of length at least 2.
"""
if len(regularizers_to_group) < 2:
raise ValueError('Groups must be of at least size 2.')
self._regularization_vector = tf.add_n(
[r.regularization_vector for r in regularizers_to_group])
self._alive_vector = tf.cast(
tf.ones(self._regularization_vector.get_shape()[-1]), tf.bool)
示例6: sample
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def sample(self, n, max_length=None, z=None, c_input=None, **kwargs):
"""Sample with an optional conditional embedding `z`."""
if z is not None and int(z.shape[0]) != n:
raise ValueError(
'`z` must have a first dimension that equals `n` when given. '
'Got: %d vs %d' % (z.shape[0], n))
if self.hparams.z_size and z is None:
tf.logging.warning(
'Sampling from conditional model without `z`. Using random `z`.')
normal_shape = [n, self.hparams.z_size]
normal_dist = tfp.distributions.Normal(
loc=tf.zeros(normal_shape), scale=tf.ones(normal_shape))
z = normal_dist.sample()
return self.decoder.sample(n, max_length, z, c_input, **kwargs)
示例7: scalar_concat
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def scalar_concat(tensor, scalar):
"""Concatenates a scalar to the last dimension of a tensor.
Args:
tensor: A `Tensor`.
scalar: a scalar `Tensor` to concatenate to tensor `tensor`.
Returns:
A `Tensor`. If `tensor` has shape [...,N], the result R has shape
[...,N+1] and R[...,N] = scalar.
Raises:
ValueError: If `tensor` is a scalar `Tensor`.
"""
ndims = tensor.shape.ndims
if ndims < 1:
raise ValueError('`tensor` must have number of dimensions >= 1.')
shape = tf.shape(tensor)
return tf.concat(
[tensor,
tf.ones([shape[i] for i in range(ndims - 1)] + [1]) * scalar],
axis=ndims - 1)
示例8: nearest_upsampling
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def nearest_upsampling(data, height_scale, width_scale, data_format):
"""Nearest neighbor upsampling implementation."""
with tf.name_scope('nearest_upsampling'):
# Use reshape to quickly upsample the input. The nearest pixel is selected
# implicitly via broadcasting.
if data_format == 'channels_first':
# Possibly faster for certain GPUs only.
bs, c, h, w = data.get_shape().as_list()
bs = -1 if bs is None else bs
data = tf.reshape(data, [bs, c, h, 1, w, 1]) * tf.ones(
[1, 1, 1, height_scale, 1, width_scale], dtype=data.dtype)
return tf.reshape(data, [bs, c, h * height_scale, w * width_scale])
# Normal format for CPU/TPU/GPU.
bs, h, w, c = data.get_shape().as_list()
bs = -1 if bs is None else bs
data = tf.reshape(data, [bs, h, 1, w, 1, c]) * tf.ones(
[1, 1, height_scale, 1, width_scale, 1], dtype=data.dtype)
return tf.reshape(data, [bs, h * height_scale, w * width_scale, c])
示例9: pad_to_fixed_size
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def pad_to_fixed_size(data, pad_value, output_shape):
"""Pad data to a fixed length at the first dimension.
Args:
data: Tensor to be padded to output_shape.
pad_value: A constant value assigned to the paddings.
output_shape: The output shape of a 2D tensor.
Returns:
The Padded tensor with output_shape [max_num_instances, dimension].
"""
max_num_instances = output_shape[0]
dimension = output_shape[1]
data = tf.reshape(data, [-1, dimension])
num_instances = tf.shape(data)[0]
assert_length = tf.Assert(
tf.less_equal(num_instances, max_num_instances), [num_instances])
with tf.control_dependencies([assert_length]):
pad_length = max_num_instances - num_instances
paddings = pad_value * tf.ones([pad_length, dimension])
padded_data = tf.concat([data, paddings], axis=0)
padded_data = tf.reshape(padded_data, output_shape)
return padded_data
示例10: test_output_size_nn_upsample_conv
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def test_output_size_nn_upsample_conv(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='nn_upsample_conv')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
示例11: test_output_size_conv2d_transpose
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def test_output_size_conv2d_transpose(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, _ = pix2pix.pix2pix_generator(
images, num_outputs, blocks=self._reduced_default_blocks(),
upsample_method='conv2d_transpose')
with self.test_session() as session:
session.run(tf.global_variables_initializer())
np_outputs = session.run(logits)
self.assertListEqual([batch_size, height, width, num_outputs],
list(np_outputs.shape))
示例12: test_block_number_dictates_number_of_layers
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def test_block_number_dictates_number_of_layers(self):
batch_size = 2
height, width = 256, 256
num_outputs = 4
images = tf.ones((batch_size, height, width, 3))
blocks = [
pix2pix.Block(64, 0.5),
pix2pix.Block(128, 0),
]
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
_, end_points = pix2pix.pix2pix_generator(
images, num_outputs, blocks)
num_encoder_layers = 0
num_decoder_layers = 0
for end_point in end_points:
if end_point.startswith('encoder'):
num_encoder_layers += 1
elif end_point.startswith('decoder'):
num_decoder_layers += 1
self.assertEqual(num_encoder_layers, len(blocks))
self.assertEqual(num_decoder_layers, len(blocks))
示例13: test_four_layers
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def test_four_layers(self):
batch_size = 2
input_size = 256
output_size = self._layer_output_size(input_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size)
output_size = self._layer_output_size(output_size, stride=1)
output_size = self._layer_output_size(output_size, stride=1)
images = tf.ones((batch_size, input_size, input_size, 3))
with slim.arg_scope(pix2pix.pix2pix_arg_scope()):
logits, end_points = pix2pix.pix2pix_discriminator(
images, num_filters=[64, 128, 256, 512])
self.assertListEqual([batch_size, output_size, output_size, 1],
logits.shape.as_list())
self.assertListEqual([batch_size, output_size, output_size, 1],
end_points['predictions'].shape.as_list())
示例14: native_crop_and_resize
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def native_crop_and_resize(image, boxes, crop_size, scope=None):
"""Same as `matmul_crop_and_resize` but uses tf.image.crop_and_resize."""
def get_box_inds(proposals):
proposals_shape = proposals.shape.as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(proposals)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
with tf.name_scope(scope, 'CropAndResize'):
cropped_regions = tf.image.crop_and_resize(
image, tf.reshape(boxes, [-1] + boxes.shape.as_list()[2:]),
get_box_inds(boxes), crop_size)
final_shape = tf.concat([tf.shape(boxes)[:2],
tf.shape(cropped_regions)[1:]], axis=0)
return tf.reshape(cropped_regions, final_shape)
示例15: expanded_shape
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ones [as 别名]
def expanded_shape(orig_shape, start_dim, num_dims):
"""Inserts multiple ones into a shape vector.
Inserts an all-1 vector of length num_dims at position start_dim into a shape.
Can be combined with tf.reshape to generalize tf.expand_dims.
Args:
orig_shape: the shape into which the all-1 vector is added (int32 vector)
start_dim: insertion position (int scalar)
num_dims: length of the inserted all-1 vector (int scalar)
Returns:
An int32 vector of length tf.size(orig_shape) + num_dims.
"""
with tf.name_scope('ExpandedShape'):
start_dim = tf.expand_dims(start_dim, 0) # scalar to rank-1
before = tf.slice(orig_shape, [0], start_dim)
add_shape = tf.ones(tf.reshape(num_dims, [1]), dtype=tf.int32)
after = tf.slice(orig_shape, start_dim, [-1])
new_shape = tf.concat([before, add_shape, after], 0)
return new_shape