本文整理汇总了Python中tensorflow.no_op方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.no_op方法的具体用法?Python tensorflow.no_op怎么用?Python tensorflow.no_op使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.no_op方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def __init__(self, optimizer, layer_size, num_layers, learn_mixture_weights, seed):
"""Initializes a `_DNNBuilder`.
Args:
optimizer: An `Optimizer` instance for training both the subnetwork and
the mixture weights.
layer_size: The number of nodes to output at each hidden layer.
num_layers: The number of hidden layers.
learn_mixture_weights: Whether to solve a learning problem to find the
best mixture weights, or use their default value according to the
mixture weight type. When `False`, the subnetworks will return a no_op
for the mixture weight train op.
seed: A random seed.
Returns:
An instance of `_SimpleDNNBuilder`.
"""
self._optimizer = optimizer
self._layer_size = layer_size
self._num_layers = num_layers
self._learn_mixture_weights = learn_mixture_weights
self._seed = seed
示例2: testPS
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
示例3: testVariablesPS
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
示例4: begin_episode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: Tensor containing current batch indices.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
if self._last_state is None:
reset_state = tf.no_op()
else:
reset_state = utility.reinit_nested_vars(
self._last_state, agent_indices)
reset_buffer = self._episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
示例5: weight_noise
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def weight_noise(noise_rate, learning_rate, var_list):
"""Apply weight noise to vars in var_list."""
if not noise_rate:
return [tf.no_op()]
tf.logging.info("Applying weight noise scaled by learning rate, "
"noise_rate: %0.5f", noise_rate)
noise_ops = []
for v in var_list:
with tf.device(v._ref().device): # pylint: disable=protected-access
scale = noise_rate * learning_rate * 0.001
tf.summary.scalar("weight_noise_scale", scale)
noise = tf.truncated_normal(v.shape) * scale
noise_op = v.assign_add(noise)
noise_ops.append(noise_op)
return noise_ops
示例6: _finish
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def _finish(self, update_ops, name_scope):
"""Updates beta_power variables every n batches and incrs counter."""
iter_ = self._get_iter_variable()
beta1_power, beta2_power = self._get_beta_accumulators()
with tf.control_dependencies(update_ops):
with tf.colocate_with(iter_):
def update_beta_op():
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t,
use_locking=self._use_locking)
update_beta2 = beta2_power.assign(
beta2_power * self._beta2_t,
use_locking=self._use_locking)
return tf.group(update_beta1, update_beta2)
maybe_update_beta = tf.cond(
tf.equal(iter_, 0), update_beta_op, tf.no_op)
with tf.control_dependencies([maybe_update_beta]):
update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
use_locking=self._use_locking)
return tf.group(
*update_ops + [update_iter, maybe_update_beta], name=name_scope)
示例7: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def __init__(self, ckpt_dir, **kwargs_saver):
"""
:param ckpt_dir: where to save data
:param kwargs_saver: Passed on to the tf.train.Saver that will be created
"""
os.makedirs(ckpt_dir, exist_ok=True)
self.ckpt_dir = ckpt_dir
self.ckpt_base_file_path = path.join(ckpt_dir, _CKPT_FN)
all_saveable_vars = tf_helpers.all_saveable_objects()
var_list = kwargs_saver.get('var_list', all_saveable_vars)
var_names = VarNames(ckpt_dir)
if not var_names.exists():
print('Saver for {} saves {} variables...'.format(self.ckpt_dir, len(var_list)))
var_names.write([v.name for v in var_list])
unrestored_vars = [v for v in all_saveable_vars if v not in var_list]
if unrestored_vars:
print('Found {} unrestored variables'.format(len(unrestored_vars)))
self.init_unrestored_op = (tf.variables_initializer(unrestored_vars)
if unrestored_vars else tf.no_op())
self.saver = tf.train.Saver(**kwargs_saver)
示例8: variable_synchronizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def variable_synchronizer(comm, vars, *, limit=1<<28):
"""Synchronize `vars` from the root to other processs"""
if comm.Get_size() == 1:
return tf.no_op()
# Split vars into chunks so that no chunk is over limit bytes
batches = chunk_tensors(sorted(vars, key=lambda v: v.name), limit=limit)
# Synchronize each batch, using a separate communicator to ensure safety
prev = tf.no_op()
for batch in batches:
with tf.control_dependencies([prev]):
assigns = []
values = map_flat_bits(partial(mpi_bcast, comm), batch)
for var, value in zip(batch, values):
assigns.append(var.assign(value))
prev = tf.group(*assigns)
return prev
示例9: apply_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
ws = [v for _,v in grads_and_vars]
grads = [g for g,_ in grads_and_vars]
self._prepare()
jac_vec = self.fwd_gradients(grads,ws, grad_xs=grads,stop_gradients=ws)
jac_vec = [tf.zeros_like(x) if dydx is None else dydx for x,dydx in zip(ws,jac_vec)]
jac_tran_vec = tf.gradients(grads, ws, grad_ys=grads, stop_gradients=ws)
jac_tran_vec = [tf.zeros_like(x) if dydx is None else dydx for x,dydx in zip(ws,jac_tran_vec)]
at_xi = [(ht-h)*0.5 for (h,ht) in zip(jac_vec, jac_tran_vec)]
if self.config.minus:
new_grads = [g-a for g,a in zip(grads, at_xi)]
else:
new_grads = [g+a for g,a in zip(grads, at_xi)]
grads_and_vars2 = zip(new_grads, ws)
op8 = self.optimizer.apply_gradients(list(grads_and_vars2).copy(), global_step=global_step, name=name)
with tf.get_default_graph().control_dependencies([op8]):
return tf.no_op()
示例10: _get_data
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def _get_data(dataset, batch_size=None, num_epochs=None, num_readers=1):
""" Get the subset of the passed in dataset from the directory indicated """
if batch_size is None:
raise ValueError('batch_size must not specified')
data_provider = slim.dataset_data_provider.DatasetDataProvider(
dataset, num_readers=num_readers, num_epochs=num_epochs,
common_queue_capacity=20 * batch_size, common_queue_min=10 * batch_size)
[image] = data_provider.get(['image'])
image = preprocess_image(image)
return tf.no_op(), {}, image
# pylint: disable=unused-argument
示例11: _graph_fn_get_should_sync
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def _graph_fn_get_should_sync(self):
if get_backend() == "tf":
inc_op = tf.assign_add(self.steps_since_last_sync, 1)
should_sync = inc_op >= self.q_sync_spec.sync_interval
def reset_op():
op = tf.assign(self.steps_since_last_sync, 0)
with tf.control_dependencies([op]):
return tf.no_op()
sync_op = tf.cond(
pred=inc_op >= self.q_sync_spec.sync_interval,
true_fn=reset_op,
false_fn=tf.no_op
)
with tf.control_dependencies([sync_op]):
return tf.identity(should_sync)
else:
raise NotImplementedError("TODO")
示例12: _graph_fn_sync
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def _graph_fn_sync(self, should_sync):
assign_ops = []
tau = self.q_sync_spec.sync_tau
if tau != 1.0:
all_source_vars = [source.get_variables(collections=None, custom_scope_separator="-") for source in self._q_functions]
all_dest_vars = [destination.get_variables(collections=None, custom_scope_separator="-") for destination in self._target_q_functions]
for source_vars, dest_vars in zip(all_source_vars, all_dest_vars):
for (source_key, source_var), (dest_key, dest_var) in zip(sorted(source_vars.items()), sorted(dest_vars.items())):
assign_ops.append(tf.assign(dest_var, tau * source_var + (1.0 - tau) * dest_var))
else:
all_source_vars = [source.variables() for source in self._q_functions]
for source_vars, destination in zip(all_source_vars, self._target_q_functions):
assign_ops.append(destination.sync(source_vars))
assert len(assign_ops) > 0
grouped_op = tf.group(assign_ops)
def assign_op():
# Make sure we are returning no_op as opposed to reference
with tf.control_dependencies([grouped_op]):
return tf.no_op()
cond_assign_op = tf.cond(should_sync, true_fn=assign_op, false_fn=tf.no_op)
with tf.control_dependencies([cond_assign_op]):
return tf.no_op()
示例13: _graph_fn_kl_divergence
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def _graph_fn_kl_divergence(self, distribution, distribution_b):
"""
Kullback-Leibler divergence between two distribution objects.
Args:
distribution (tf.Distribution): The (already parameterized) backend-specific distribution 1.
distribution_b (tf.Distribution): The other distribution object.
Returns:
DataOp: (batch-wise) KL-divergence between the two distributions.
"""
if get_backend() == "tf":
return tf.no_op()
# TODO: never tested. tf throws error: NotImplementedError: No KL(distribution_a || distribution_b) registered for distribution_a type Bernoulli and distribution_b type ndarray
#return tf.distributions.kl_divergence(
# distribution_a=distribution_a,
# distribution_b=distribution_b,
# allow_nan_stats=True,
# name=None
#)
示例14: build_mixture_weights_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
if not self._learn_mixture_weights:
return tf.no_op()
return self._optimizer.minimize(loss=loss, var_list=var_list)
示例15: build_mixture_weights_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import no_op [as 别名]
def build_mixture_weights_train_op(self, loss, var_list, logits, labels,
iteration_step, summary):
"""See `adanet.subnetwork.Builder`."""
return tf.no_op("mixture_weights_train_op")