本文整理汇总了Python中tensorflow.compat.v1.zeros方法的典型用法代码示例。如果您正苦于以下问题:Python v1.zeros方法的具体用法?Python v1.zeros怎么用?Python v1.zeros使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.zeros方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _defer_tensor
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def _defer_tensor(tensor):
"""Defers the retrieval of a tensor.
The tensor is put into a StagingArea, and the return value is the
retrieval of the tensor from the StagingArea. The effect is that the
tensor returned from this function is the tensor that was put in the
StagingArea for the previous Session.run() call.
Args:
tensor: The tensor to defer for one step.
Returns:
deferred_tensor: The tensor deferred for one step.
put_op: An op to put `tensor` in the StagingArea. Must be run every step
that `deferred_tensor` is run.
warmup_op: A warmup op that should be called before the first step. Puts
a zero tensor into the StagingArea.
"""
tensor_stage = data_flow_ops.StagingArea([tensor.dtype], [tensor.shape])
put_op = tensor_stage.put([tensor])
warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# Fetch the next tensor to use.
(tensor,) = tensor_stage.get()
return tensor, put_op, warmup_op
示例2: _run_benchmark_cnn_with_black_and_white_images
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def _run_benchmark_cnn_with_black_and_white_images(self, params):
"""Runs BenchmarkCNN with black and white images.
A BenchmarkCNN is created and run with black and white images as input. Half
the images are black (i.e., filled with 0s) and half are white (i.e., filled
with 255s).
Args:
params: Params for BenchmarkCNN.
Returns:
A list of lines from the output of BenchmarkCNN.
"""
# TODO(reedwm): Instead of generating images here, use black and white
# tfrecords by calling test_util.create_black_and_white_images().
effective_batch_size = params.batch_size * params.num_gpus
half_batch_size = effective_batch_size // 2
images = np.zeros((effective_batch_size, 227, 227, 3), dtype=np.float32)
images[half_batch_size:, :, :, :] = 255
labels = np.array([0] * half_batch_size + [1] * half_batch_size,
dtype=np.int32)
return self._run_benchmark_cnn_with_fake_images(params, images, labels)
示例3: testLowAccuracy
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def testLowAccuracy(self):
params = test_util.get_params('testLowAccuracy')._replace(
print_training_accuracy=True, batch_size=5, num_batches=10)
# We force low accuracy by having each batch containing 10 identical images,
# each with a different label. This guarantees a top-1 accuracy of exactly
# 0.1 and a top-5 accuracy of exactly 0.5.
images = np.zeros((10, 227, 227, 3), dtype=np.float32)
labels = np.arange(10, dtype=np.int32)
logs = self._run_benchmark_cnn_with_fake_images(params, images, labels)
training_outputs = test_util.get_training_outputs_from_logs(
logs, params.print_training_accuracy)
last_output = training_outputs[-1]
# TODO(reedwm): These should be assertEqual but for some reason,
# occasionally the accuracies are lower (Running this test 500 times, these
# asserts failed twice). Investigate this problem.
self.assertLessEqual(last_output.top_1_accuracy, 0.1)
self.assertLessEqual(last_output.top_5_accuracy, 0.5)
示例4: testPreprocessingTrain
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def testPreprocessingTrain(self):
test_data_dir = os.path.join(platforms_util.get_test_data_dir(), 'images')
black_file = os.path.join(test_data_dir, 'black_image.jpg')
with open(black_file, 'rb') as f:
black_jpg_buffer = f.read()
white_file = os.path.join(test_data_dir, 'white_image.jpg')
with open(white_file, 'rb') as f:
white_jpg_buffer = f.read()
bbox = tf.zeros((1, 0, 4), dtype=tf.float32)
batch_position = 0
# Each size config is (output_height, output_width, resize_method)
size_configs = [(100, 100, 'round_robin'), (150, 10, 'bilinear'),
(10, 150, 'nearest')]
# Each image config is (image_buf, image_color)
image_configs = [(white_jpg_buffer, 255), (black_jpg_buffer, 0)]
for (image_buf, image_color) in image_configs:
for output_height, output_width, resize_method in size_configs:
for distortions in [True, False]:
for summary_verbosity in [0, 2]:
for fuse_decode_and_crop in [True, False]:
self._test_preprocessing_traing(
image_buf, image_color, output_height, output_width, bbox,
batch_position, resize_method, distortions, summary_verbosity,
fuse_decode_and_crop)
示例5: add_edge_bias
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def add_edge_bias(x, filter_size):
"""Pad x and concatenates an edge bias across the depth of x.
The edge bias can be thought of as a binary feature which is unity when
the filter is being convolved over an edge and zero otherwise.
Args:
x: Input tensor, shape (NHWC)
filter_size: filter_size to determine padding.
Returns:
x_pad: Input tensor, shape (NHW(c+1))
"""
x_shape = common_layers.shape_list(x)
if filter_size[0] == 1 and filter_size[1] == 1:
return x
a = (filter_size[0] - 1) // 2 # vertical padding size
b = (filter_size[1] - 1) // 2 # horizontal padding size
padding = [[0, 0], [a, a], [b, b], [0, 0]]
x_bias = tf.zeros(x_shape[:-1] + [1])
x = tf.pad(x, padding)
x_pad = tf.pad(x_bias, padding, constant_values=1)
return tf.concat([x, x_pad], axis=3)
示例6: single_conv_dist
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def single_conv_dist(name, x, output_channels=None):
"""A 3x3 convolution mapping x to a standard normal distribution at init.
Args:
name: variable scope.
x: 4-D Tensor.
output_channels: number of channels of the mean and std.
"""
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
x_shape = common_layers.shape_list(x)
if output_channels is None:
output_channels = x_shape[-1]
mean_log_scale = conv("conv2d", x, output_channels=2*output_channels,
conv_init="zeros", apply_actnorm=False)
mean = mean_log_scale[:, :, :, 0::2]
log_scale = mean_log_scale[:, :, :, 1::2]
return tf.distributions.Normal(mean, tf.exp(log_scale))
示例7: infer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ
"""Produce predictions from the model by sampling."""
del args, kwargs
# Inputs and features preparation needed to handle edge cases.
if not features:
features = {}
inputs_old = None
if "inputs" in features and len(features["inputs"].shape) < 4:
inputs_old = features["inputs"]
features["inputs"] = tf.expand_dims(features["inputs"], 2)
# Sample and decode.
num_channels = self.num_channels
if "targets" not in features:
features["targets"] = tf.zeros(
[self.hparams.batch_size, 1, 1, num_channels], dtype=tf.int32)
logits, _ = self(features) # pylint: disable=not-callable
samples = tf.argmax(logits, axis=-1)
# Restore inputs to not confuse Estimator in edge cases.
if inputs_old is not None:
features["inputs"] = inputs_old
# Return samples.
return samples
示例8: test_cell_shapes
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def test_cell_shapes(self):
"""Check that all the NeuralStackCell tensor shapes are correct.
"""
batch_size = 5
embedding_size = 3
memory_size = 6
num_units = 8
stack = neural_stack.NeuralStackCell(num_units, memory_size, embedding_size)
stack.build(None)
self.assertEqual([1, 1, memory_size, memory_size],
stack.get_read_mask(0).shape)
stack_input = tf.zeros([batch_size, 1, embedding_size], dtype=tf.float32)
zero_state = stack.zero_state(batch_size, tf.float32)
(outputs, (stack_next_state)) = stack.call(stack_input, zero_state)
# Make sure that stack output shapes match stack input shapes
self.assertEqual(outputs.shape, stack_input.shape)
assert_cell_shapes(self, stack_next_state, zero_state)
示例9: inject_latent
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def inject_latent(self, layer, inputs, target, action):
"""Inject a VAE-style latent."""
del action
# Latent for stochastic model
filters = 128
full_video = tf.stack(inputs + [target], axis=1)
latent_mean, latent_std = self.construct_latent_tower(
full_video, time_axis=1)
latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
latent = tfl.flatten(latent)
latent = tf.expand_dims(latent, axis=1)
latent = tf.expand_dims(latent, axis=1)
latent_mask = tfl.dense(latent, filters, name="latent_mask")
zeros_mask = tf.zeros(
common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)
layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)
extra_loss = self.get_kl_loss([latent_mean], [latent_std])
return layer, extra_loss
示例10: testExtractblocks
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def testExtractblocks(self):
batch_size = 1
num_heads = 3
height = 6
width = 10
depth = 15
block_h = 3
block_w = 2
t = np.random.rand(batch_size * num_heads, height, width, depth)
a = common_attention._extract_blocks(t, block_h, block_w)
self.evaluate(tf.global_variables_initializer())
res = self.evaluate(a)
self.assertEqual(res.shape, (batch_size * num_heads, height//block_h,
width//block_w, block_h, block_w, depth))
# also check if the content is right
out = np.zeros((batch_size*num_heads, height//block_h,
width//block_w, block_h, block_w, depth))
for b in range(batch_size*num_heads):
for x in range(height//block_h):
for y in range(width//block_w):
for v in range(block_h):
for w in range(block_w):
out[b, x, y, v, w] = t[b, block_h*x+v, block_w*y+w]
self.assertAllClose(res, out)
示例11: parse_and_preprocess
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def parse_and_preprocess(self, value, batch_position):
assert self.supports_datasets()
image_buffer, label_index, bbox, _ = parse_example_proto(value)
if self.match_mlperf:
bbox = tf.zeros((1, 0, 4), dtype=bbox.dtype)
mlperf.logger.log(key=mlperf.tags.INPUT_CROP_USES_BBOXES, value=False)
else:
mlperf.logger.log(key=mlperf.tags.INPUT_CROP_USES_BBOXES, value=True)
image = self.preprocess(image_buffer, bbox, batch_position)
return (image, label_index)
示例12: zero_state
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def zero_state(self, batch_size, dtype):
with tf.name_scope(type(self).__name__ + 'ZeroState', values=[batch_size]):
return tf.zeros([batch_size, 1], dtype)
示例13: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def __init__(self, batch_env):
super(_MemoryWrapper, self).__init__(batch_env)
infinity = 10000000
meta_data = list(zip(*_rollout_metadata(batch_env)))
# In memory wrapper we do not collect pdfs neither value_function
# thus we only need the first 4 entries of meta_data
shapes = meta_data[0][:4]
dtypes = meta_data[1][:4]
self.speculum = tf.FIFOQueue(infinity, shapes=shapes, dtypes=dtypes)
observs_shape = batch_env.observ.shape
# TODO(piotrmilos): possibly retrieve the observation type for batch_env
self._observ = tf.Variable(
tf.zeros(observs_shape, self.observ_dtype), trainable=False)
示例14: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def __init__(self, batch_env, history=4):
super(StackWrapper, self).__init__(batch_env)
self.history = history
self.old_shape = batch_env.observ_shape
# TODO(afrozm): Make into tf.get_variable and use_resource=True
self._observ = tf.Variable(
tf.zeros((len(self),) + self.observ_shape, self.observ_dtype),
trainable=False)
示例15: __init__
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import zeros [as 别名]
def __init__(self, batch_env):
"""Batch of environments inside the TensorFlow graph.
Args:
batch_env: Batch environment.
"""
super(PyFuncBatchEnv, self).__init__(batch_env.observation_space,
batch_env.action_space)
self._batch_env = batch_env
with tf.variable_scope("env_temporary"):
self._observ = tf.Variable(
tf.zeros((self._batch_env.batch_size,) + self.observ_shape,
self.observ_dtype),
name="observ", trainable=False)