本文整理汇总了Python中tensorflow.colocate_with方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.colocate_with方法的具体用法?Python tensorflow.colocate_with怎么用?Python tensorflow.colocate_with使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.colocate_with方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _finish
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _finish(self, update_ops, name_scope):
"""Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope)
示例2: init_vq_bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def init_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[bottleneck_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例3: get_vq_bottleneck
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def get_vq_bottleneck(bottleneck_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
means = tf.get_variable(
name="means",
shape=[bottleneck_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[bottleneck_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例4: get_vq_codebook
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def get_vq_codebook(codebook_size, hidden_size):
"""Get lookup table for VQ bottleneck."""
with tf.variable_scope("vq", reuse=tf.AUTO_REUSE):
means = tf.get_variable(
name="means",
shape=[codebook_size, hidden_size],
initializer=tf.uniform_unit_scaling_initializer())
ema_count = tf.get_variable(
name="ema_count",
shape=[codebook_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(means):
ema_means = tf.get_variable(
name="ema_means",
initializer=means.initialized_value(),
trainable=False)
return means, ema_means, ema_count
示例5: _get_transformed_random_signs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _get_transformed_random_signs(self):
if self.mat_type == "Fisher":
mult_func = lambda loss, index: loss.multiply_fisher_factor(index)
inner_shape_func = lambda loss: loss.fisher_factor_inner_shape
elif self.mat_type == "GGN":
mult_func = lambda loss, index: loss.multiply_ggn_factor(index)
inner_shape_func = lambda loss: loss.ggn_factor_inner_shape
transformed_random_signs = []
for loss in self.layers.losses:
with tf.colocate_with(self.layers.loss_colocation_ops[loss]):
value = mult_func(loss,
utils.generate_random_signs(inner_shape_func(loss),
dtype=loss.dtype))
coeff = tf.cast(self.layers.loss_coeffs[loss], dtype=value.dtype)
transformed_random_signs.append(tf.sqrt(coeff) * value)
return transformed_random_signs
示例6: eval_losses
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def eval_losses(self, target_mode="data", coeff_mode="regular"):
"""Returns evaluated losses (colocated with inputs to losses)."""
evals = []
for loss in self.losses:
with tf.colocate_with(self.loss_colocation_ops[loss]):
if target_mode == "data":
loss_value = loss.evaluate()
elif target_mode == "sample":
loss_value = loss.evaluate_on_sample()
else:
raise ValueError("target_mode must be in ['data', 'sample']")
if coeff_mode == "regular":
multiplier = self.loss_coeffs[loss]
elif coeff_mode == "sqrt":
multiplier = tf.sqrt(self.loss_coeffs[loss])
elif coeff_mode == "off":
multiplier = 1.0
else:
raise ValueError("coeff_mode must be in ['regular', 'sqrt', 'off']")
multiplier = tf.cast(multiplier, dtype=loss_value.dtype)
evals.append(multiplier * loss_value)
return evals
示例7: concat_all_device_tensors
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def concat_all_device_tensors(self, all_device_tensors):
"""For each device, concatenate the device's tensors into a single tensor.
Args:
all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
is a tensor where `i` is the device index and `j` is the tensor index.
Returns:
A list of list of tensors in a similar form as all_device_tensors, except
the tensors on each device have been concatenated. Each inner list
consists of a single concatenated tensor.
"""
assert self._next_method == 'concat'
new_all_device_tensors = []
tensor_states = []
for device_tensors in all_device_tensors:
with tf.colocate_with(device_tensors[0]):
concat_tensor, tensor_state = self._concat_tensors(device_tensors)
new_all_device_tensors.append([concat_tensor])
tensor_states.append(tensor_state)
self._tensor_states = tensor_states
self._next_method = 'split'
return new_all_device_tensors
示例8: find_state_op_colocation_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def find_state_op_colocation_error(graph, reported_tags=None):
"""Returns error message for colocation of state ops, or None if ok."""
state_op_types = list_registered_stateful_ops_without_inputs(
graph.as_graph_def())
state_op_map = {op.name: op for op in graph.get_operations()
if op.type in state_op_types}
for op in state_op_map.values():
for colocation_group in op.colocation_groups():
if not (colocation_group.startswith(tf.compat.as_bytes("loc:@")) and
tf.compat.as_str_any(colocation_group[5:]) in state_op_map):
tags_prefix = ("" if reported_tags is None else
"in the graph for tags %s, " % reported_tags)
return (
"A state-holding node x of a module's graph (e.g., a Variable op) "
"must not be subject to a tf.colocate_with(y) constraint "
"unless y is also a state-holding node.\n"
"Details: %snode '%s' has op '%s', which counts as state-holding, "
"but Operation.colocation_groups() == %s. " %
(tags_prefix, op.name, op.type, op.colocation_groups()))
return None
示例9: find_signature_inputs_from_multivalued_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def find_signature_inputs_from_multivalued_ops(inputs):
"""Returns error message for module inputs from ops with multiple outputs."""
dense_inputs = [] # List of (str, Tensor), with SparseTensors decomposed.
for name, tensor in sorted(inputs.items()):
if isinstance(tensor, tf.SparseTensor):
dense_inputs.extend(("%s.%s" % (name, attr), getattr(tensor, attr))
for attr in ("indices", "values", "dense_shape"))
else:
dense_inputs.append((name, tensor))
warnings = [(name, tensor.name) for name, tensor in dense_inputs
if len(tensor.op.outputs) != 1]
if warnings:
return (
"WARNING: The inputs declared in hub.add_signature() should be tensors "
"from ops with a single output, or else uses of tf.colocate_with() on "
"that op can trigger fatal errors when the module is applied and "
"colocation constraints have to be rewritten.\nAffected inputs: %s" %
", ".join("%s='%s'" % pair for pair in warnings))
return None
示例10: _create_slots
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _create_slots(self, var_list):
"""Create slot variables for Adam with accumulated gradients."""
super(MultistepAdamOptimizer, self)._create_slots(var_list)
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1,
name="iter",
colocate_with=first_var)
for v in var_list:
self._zeros_slot(v, "grad_acc", self._name)
示例11: testDiscreteBottleneckVQ
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def testDiscreteBottleneckVQ(self):
hidden_size = 60
z_size = 4
x = tf.zeros(shape=[100, 1, hidden_size], dtype=tf.float32)
with tf.variable_scope("test", reuse=tf.AUTO_REUSE):
means = tf.get_variable("means",
shape=[1, 1, 2**z_size, hidden_size],
initializer=tf.constant_initializer(0.),
dtype=tf.float32)
ema_count = []
ema_count_i = tf.get_variable(
"ema_count",
[1, 2**z_size],
initializer=tf.constant_initializer(0),
trainable=False)
ema_count.append(ema_count_i)
ema_means = []
with tf.colocate_with(means):
ema_means_i = tf.get_variable("ema_means",
initializer=means.initialized_value()[0],
trainable=False)
ema_means.append(ema_means_i)
x_means_dense, x_means_hot, _, _, _ = discretization.discrete_bottleneck(
x, hidden_size, z_size, 32, means=means, num_blocks=1,
ema_means=ema_means, ema_count=ema_count, name="test")
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
x_means_dense_eval, x_means_hot_eval = sess.run(
[x_means_dense, x_means_hot])
means_eval = sess.run(means)
self.assertEqual(x_means_dense_eval.shape, (100, 1, hidden_size))
self.assertEqual(x_means_hot_eval.shape, (100, 1))
self.assertTrue(np.all(means_eval == np.zeros(
(1, 1, 2**z_size, hidden_size))))
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def __init__(self, hparams):
self.hparams = hparams
print ("self.hparams.z_size", self.hparams.z_size)
# Set the discretization bottleneck specific things here
self.hparams.z_size_per_residual = self.hparams.z_size // \
self.hparams.num_residuals
print ("self.hparams.num_residuals", self.hparams.num_residuals)
self.hparams.block_dim = int(
self.hparams.hidden_size // self.hparams.num_blocks)
self.hparams.block_v_size = 2**(
self.hparams.z_size_per_residual / self.hparams.num_blocks)
self.hparams.block_v_size = int(self.hparams.block_v_size)
self.means = tf.get_variable(
name="means",
shape=[
self.hparams.num_blocks, self.hparams.block_v_size,
self.hparams.block_dim
],
initializer=tf.initializers.variance_scaling(distribution="uniform"))
# Create the shadow variables if we are using EMA
if self.hparams.ema:
self.ema_count = tf.get_variable(
"ema_count", [self.hparams.num_blocks, self.hparams.block_v_size],
initializer=tf.constant_initializer(0),
trainable=False)
with tf.colocate_with(self.means):
self.ema_means = tf.get_variable(
"ema_means",
initializer=self.means.initialized_value(),
trainable=False)
示例13: _update_velocities
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _update_velocities(self, vecs_and_vars, decay, vec_coeff=1.0):
"""Updates the velocities of the variables with the given vectors.
Args:
vecs_and_vars: List of (vector, variable) pairs.
decay: How much to decay the old velocity by. This is often referred to
as the 'momentum constant'.
vec_coeff: Coefficient to apply to the vectors before adding them to the
velocity.
Returns:
A list of (velocity, var) indicating the new velocity for each var.
"""
def _update_velocity(vec, var):
velocity = self._zeros_slot(var, "velocity", self.get_name())
with tf.colocate_with(velocity):
# NOTE(mattjj): read/modify/write race condition not suitable for async.
# Compute the new velocity for this variable.
new_velocity = decay * velocity + vec_coeff * vec
# Save the updated velocity.
return (tf.identity(utils.smart_assign(velocity, new_velocity)), var)
# Go through variable and update its associated part of the velocity vector.
return [_update_velocity(vec, var) for vec, var in vecs_and_vars]
示例14: _get_grads_lists_exact
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _get_grads_lists_exact(self, tensors):
if self.mat_type == "Fisher":
# pylint: disable=g-long-lambda
mult_func = (lambda loss, index:
loss.multiply_fisher_factor_replicated_one_hot(index))
inner_shape_func = lambda loss: loss.fisher_factor_inner_static_shape
elif self.mat_type == "GGN":
# pylint: disable=g-long-lambda
mult_func = (lambda loss, index:
loss.multiply_ggn_factor_replicated_one_hot(index))
inner_shape_func = lambda loss: loss.fisher_ggn_inner_static_shape
# Loop over all coordinates of all losses.
grads_all = []
for loss in self.layers.losses:
with tf.colocate_with(self.layers.loss_colocation_ops[loss]):
for index in np.ndindex(*inner_shape_func(loss)[1:]):
value = mult_func(loss, index)
coeff = tf.cast(self.layers.loss_coeffs[loss], dtype=value.dtype)
transformed_one_hot = tf.sqrt(coeff) * value
grads_flat = tf.gradients(
loss.inputs,
nest.flatten(tensors),
grad_ys=transformed_one_hot,
colocate_gradients_with_ops=self._colocate_gradients_with_ops)
grads_all.append(nest.pack_sequence_as(tensors, grads_flat))
return tuple(zip(*grads_all))
示例15: _multiply_across_losses
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import colocate_with [as 别名]
def _multiply_across_losses(self, mult_func, vecs, coeff_mode="regular"):
products = []
for loss, vec in zip(self._losses, vecs):
with tf.colocate_with(self._loss_colocation_ops[loss]):
if coeff_mode == "regular":
multiplier = self._get_loss_coeff(loss)
elif coeff_mode == "sqrt":
multiplier = tf.sqrt(self._get_loss_coeff(loss))
val = mult_func(loss, vec)
products.append(tf.cast(multiplier, dtype=val.dtype) * val)
return tuple(products)