本文整理汇总了Python中tensorflow.compat.v1.add_to_collection方法的典型用法代码示例。如果您正苦于以下问题:Python v1.add_to_collection方法的具体用法?Python v1.add_to_collection怎么用?Python v1.add_to_collection使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.add_to_collection方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: provide_data
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def provide_data(self, batch_size):
"""Returns a batch of data and one-hot labels."""
with tf.name_scope('inputs'):
with tf.device('/cpu:0'):
dataset = self.dataset.provide_dataset()
dataset = dataset.shuffle(buffer_size=1000)
dataset = dataset.map(self._map_fn, num_parallel_calls=4)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
iterator.initializer)
data, one_hot_labels = iterator.get_next()
data.set_shape([batch_size, None, None, None])
one_hot_labels.set_shape([batch_size, None])
return data, one_hot_labels
示例2: _variable_with_weight_decay
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例3: build_multi_device_iterator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def build_multi_device_iterator(self, batch_size, num_splits, cpu_device,
params, gpu_devices, dataset, doing_eval):
"""Creates a MultiDeviceIterator."""
assert self.supports_datasets()
assert num_splits == len(gpu_devices)
with tf.name_scope('batch_processing'):
if doing_eval:
subset = 'validation'
else:
subset = 'train'
batch_size_per_split = batch_size // num_splits
ds = self.create_dataset(
batch_size,
num_splits,
batch_size_per_split,
dataset,
subset,
train=(not doing_eval),
datasets_repeat_cached_sample=params.datasets_repeat_cached_sample,
num_threads=params.datasets_num_private_threads,
datasets_use_caching=params.datasets_use_caching,
datasets_parallel_interleave_cycle_length=(
params.datasets_parallel_interleave_cycle_length),
datasets_sloppy_parallel_interleave=(
params.datasets_sloppy_parallel_interleave),
datasets_parallel_interleave_prefetch=(
params.datasets_parallel_interleave_prefetch))
multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
ds,
gpu_devices,
source_device=cpu_device,
max_buffer_size=params.multi_device_iterator_max_buffer_size)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
multi_device_iterator.initializer)
return multi_device_iterator
示例4: create_iterator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def create_iterator(self, ds):
ds_iterator = tf.data.make_initializable_iterator(ds)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
ds_iterator.initializer)
return ds_iterator
示例5: _batch_norm_without_layers
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
"""Batch normalization on `input_layer` without tf.layers."""
# We make this function as similar as possible to the
# tf.contrib.layers.batch_norm, to minimize the differences between using
# layers and not using layers.
shape = input_layer.shape
num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
initializer=tf.zeros_initializer())
if use_scale:
gamma = self.get_variable('gamma', [num_channels], tf.float32,
tf.float32, initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, tf.float32, [num_channels])
# For moving variables, we use tf.get_variable instead of self.get_variable,
# since self.get_variable returns the result of tf.cast which we cannot
# assign to.
moving_mean = tf.get_variable('moving_mean', [num_channels],
tf.float32,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = tf.get_variable('moving_variance', [num_channels],
tf.float32,
initializer=tf.ones_initializer(),
trainable=False)
if self.phase_train:
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
input_layer, gamma, beta, epsilon=epsilon,
data_format=self.data_format, is_training=True)
mean_update = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
moving_variance, batch_variance, decay=decay, zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(
input_layer, gamma, beta, mean=moving_mean,
variance=moving_variance, epsilon=epsilon,
data_format=self.data_format, is_training=False)
return bn
示例6: body
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def body(self, features):
exp_coupling = ["affine", "additive"]
if self.hparams.coupling not in exp_coupling:
raise ValueError("Expected hparams.coupling to be in %s, got %s" %
(exp_coupling, self.hparams.coupling))
if self.is_training:
init_features = self.create_init_batch(features)
init_op = self.objective_tower(init_features, init=True)
init_op = tf.Print(
init_op, [init_op], message="Triggering data-dependent init.",
first_n=20)
tf.add_to_collection("glow_init_op", init_op)
train_op = self.objective_tower(features, init=False)
return tf.zeros_like(features["targets"]), {"training": train_op}
示例7: pad_conv3d_lrelu
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
示例8: testGumbelSoftmaxDiscreteBottleneck
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def testGumbelSoftmaxDiscreteBottleneck(self):
x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32)
tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, tf.constant(1))
x_means_hot, _ = discretization.gumbel_softmax_discrete_bottleneck(
x, bottleneck_bits=2)
self.evaluate(tf.global_variables_initializer())
x_means_hot_eval = self.evaluate(x_means_hot)
self.assertEqual(np.shape(x_means_hot_eval), (2, 4))
示例9: call
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def call(self, *args, **kwargs):
outputs = super(TpuBatchNormalization, self).call(*args, **kwargs)
# A temporary hack for tf1 compatibility with keras batch norm.
for u in self.updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, u)
return outputs
示例10: scalar
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def scalar(name, tensor):
"""Stores a (name, Tensor) tuple in a custom collection."""
logging.info('Adding summary {}'.format(Pair(name, tensor)))
tf.add_to_collection('edsummaries', Pair(name, tf.reduce_mean(tensor)))
示例11: create_optimizer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def create_optimizer(self):
"""Create the optimizer and scaffold used for training."""
config = self.get_run_config()
original_optimizer = self._create_optimizer_fn()
# Override self.scaffold_fn with a custom scaffold_fn that uses the
# swapping saver required for MovingAverageOptimizer.
use_avg_model_params = self.hparams.use_avg_model_params
def scaffold_fn():
"""Create a scaffold object."""
# MovingAverageOptimizer requires Swapping Saver.
scaffold = tf.train.Scaffold()
if use_avg_model_params:
saver = original_optimizer.swapping_saver(
keep_checkpoint_every_n_hours=1)
else:
saver = None
scaffold = tf.train.Scaffold(saver=saver, copy_from_scaffold=scaffold)
# The saver needs to be added to the graph for td3 hooks.
tf.add_to_collection(tf.GraphKeys.SAVERS, scaffold.saver)
return scaffold
self._scaffold_fn = scaffold_fn
optimizer = original_optimizer
if (self._use_sync_replicas_optimizer and
config is not None and config.num_worker_replicas > 1):
optimizer = tf.train.SyncReplicasOptimizer(
optimizer,
replicas_to_aggregate=config.num_worker_replicas - 1,
total_num_replicas=config.num_worker_replicas)
if self.is_device_gpu:
optimizer = replicate_model_fn.TowerOptimizer.TowerOptimizer(optimizer)
return optimizer
示例12: create_optimizer
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def create_optimizer(self):
"""Create the optimizer used for training.
This function optionally wraps the base optimizer with SyncReplicasOptimizer
(aggregrate gradients across devices).
Returns:
An instance of `tf.train.Optimizer`.
"""
config = self.get_run_config()
optimizer = self._create_optimizer_fn()
if self._use_avg_model_params:
optimizer = optimizers.create_moving_average_optimizer(optimizer)
def create_swapping_saver_scaffold(saver=None):
saver = optimizers.create_swapping_saver(optimizer)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
return tf.train.Scaffold(saver=saver)
self._scaffold_fn = create_swapping_saver_scaffold
if (self._use_sync_replicas_optimizer and (not self.is_device_tpu) and
config is not None and config.num_worker_replicas > 1):
optimizer = tf.train.SyncReplicasOptimizer(
optimizer,
replicas_to_aggregate=config.num_worker_replicas - 1,
total_num_replicas=config.num_worker_replicas)
self._sync_replicas_optimizer = optimizer
return optimizer
示例13: loss_fun
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def loss_fun(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
distillation: if set to True, use probabilities and not class labels to
compute softmax loss
Returns:
Loss tensor of type float.
"""
# Calculate the cross entropy between labels and predictions
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels, name='cross_entropy_per_example')
# Calculate the average cross entropy loss across the batch.
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
# Add to TF collection for losses
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
示例14: _make_initializable_iterator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def _make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = tf.data.make_initializable_iterator(dataset)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator
示例15: make_initializable_iterator
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_to_collection [as 别名]
def make_initializable_iterator(dataset):
"""Creates an iterator, and initializes tables.
This is useful in cases where make_one_shot_iterator wouldn't work because
the graph contains a hash table that needs to be initialized.
Args:
dataset: A `tf.data.Dataset` object.
Returns:
A `tf.data.Iterator`.
"""
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
return iterator