本文整理汇总了Python中tensorflow.assign方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.assign方法的具体用法?Python tensorflow.assign怎么用?Python tensorflow.assign使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.assign方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_uninited_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def init_uninited_vars(vars=None):
if vars is None: vars = tf.global_variables()
test_vars = []; test_ops = []
with tf.control_dependencies(None): # ignore surrounding control_dependencies
for var in vars:
assert is_tf_expression(var)
try:
tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))
except KeyError:
# Op does not exist => variable may be uninitialized.
test_vars.append(var)
with absolute_name_scope(var.name.split(':')[0]):
test_ops.append(tf.is_variable_initialized(var))
init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]
run([var.initializer for var in init_vars])
#----------------------------------------------------------------------------
# Set the values of given tf.Variables.
# Equivalent to the following, but more efficient and does not bloat the tf graph:
# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]
示例2: finalize_autosummaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def finalize_autosummaries():
global _autosummary_finalized
if _autosummary_finalized:
return
_autosummary_finalized = True
init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars])
with tf.device(None), tf.control_dependencies(None):
for name, vars in _autosummary_vars.items():
id = name.replace('/', '_')
with absolute_name_scope('Autosummary/' + id):
sum = tf.add_n(vars)
avg = sum[0] / sum[1]
with tf.control_dependencies([avg]): # read before resetting
reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars]
with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting
tf.summary.scalar(name, avg)
# Internal helper for creating autosummary accumulators.
示例3: _create_autosummary_var
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def _create_autosummary_var(name, value_expr):
assert not _autosummary_finalized
v = tf.cast(value_expr, tf.float32)
if v.shape.ndims is 0:
v = [v, np.float32(1.0)]
elif v.shape.ndims is 1:
v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]
else:
v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]
v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))
with tf.control_dependencies(None):
var = tf.Variable(tf.zeros(2)) # [numerator, denominator]
update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))
if name in _autosummary_vars:
_autosummary_vars[name].append(var)
else:
_autosummary_vars[name] = [var]
return update_op
#----------------------------------------------------------------------------
# Call filewriter.add_summary() with all summaries in the default graph,
# automatically finalizing and merging them on the first call.
示例4: simulate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def simulate(self, action):
# There is subtlety here. We need to collect data
# obs, action = policy(obs), done, reward = env(abs, action)
# Thus we need to enqueue data before assigning new observation
reward, done = self._batch_env.simulate(action)
with tf.control_dependencies([reward, done]):
enqueue_op = self.speculum.enqueue(
[self._observ.read_value(), reward, done, action])
with tf.control_dependencies([enqueue_op]):
assign = self._observ.assign(self._batch_env.observ)
with tf.control_dependencies([assign]):
return tf.identity(reward), tf.identity(done)
示例5: setup_popart
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def setup_popart(self):
# See https://arxiv.org/pdf/1602.07714.pdf for details.
self.old_std = tf.placeholder(tf.float32, shape=[1], name='old_std')
new_std = self.ret_rms.std
self.old_mean = tf.placeholder(tf.float32, shape=[1], name='old_mean')
new_mean = self.ret_rms.mean
self.renormalize_Q_outputs_op = []
for vs in [self.critic.output_vars, self.target_critic.output_vars]:
assert len(vs) == 2
M, b = vs
assert 'kernel' in M.name
assert 'bias' in b.name
assert M.get_shape()[-1] == 1
assert b.get_shape()[-1] == 1
self.renormalize_Q_outputs_op += [M.assign(M * self.old_std / new_std)]
self.renormalize_Q_outputs_op += [b.assign((b * self.old_std + self.old_mean - new_mean) / new_std)]
示例6: get_local_rank_size
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def get_local_rank_size(comm):
"""
Returns the rank of each process on its machine
The processes on a given machine will be assigned ranks
0, 1, 2, ..., N-1,
where N is the number of processes on this machine.
Useful if you want to assign one gpu per machine
"""
this_node = platform.node()
ranks_nodes = comm.allgather((comm.Get_rank(), this_node))
node2rankssofar = defaultdict(int)
local_rank = None
for (rank, node) in ranks_nodes:
if rank == comm.Get_rank():
local_rank = node2rankssofar[node]
node2rankssofar[node] += 1
assert local_rank is not None
return local_rank, node2rankssofar[this_node]
示例7: get_checkpoint_init_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def get_checkpoint_init_fn():
"""Returns the checkpoint init_fn if the checkpoint is provided."""
if FLAGS.fine_tune_checkpoint:
variables_to_restore = slim.get_variables_to_restore()
global_step_reset = tf.assign(tf.train.get_or_create_global_step(), 0)
# When restoring from a floating point model, the min/max values for
# quantized weights and activations are not present.
# We instruct slim to ignore variables that are missing during restoration
# by setting ignore_missing_vars=True
slim_init_fn = slim.assign_from_checkpoint_fn(
FLAGS.fine_tune_checkpoint,
variables_to_restore,
ignore_missing_vars=True)
def init_fn(sess):
slim_init_fn(sess)
# If we are restoring from a floating point model, we need to initialize
# the global step to zero for the exponential decay to result in
# reasonable learning rates.
sess.run(global_step_reset)
return init_fn
else:
return None
示例8: set_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def set_variables(self, var_dict):
"""
Set the model's variables to those provided in var_dict. This is e.g. used to restore the best seen parameters
after training with patience.
Parameters
----------
var_dict: dict
Dictionary of the form {var_name: var_value} to assign the variables in the model.
Returns
-------
None.
"""
with self.graph.as_default():
if not hasattr(self, 'assign_placeholders'):
self.assign_placeholders = {v.name: tf.placeholder(v.dtype, shape=v.get_shape()) for v in self.varlist}
self.assign_ops = {v.name: tf.assign(v, self.assign_placeholders[v.name])
for v in self.varlist}
to_namespace = list(var_dict.keys())[0].split("/")[0]
self.session.run(list(self.assign_ops.values()), feed_dict = {val: var_dict[self.convert_varname(key, to_namespace)]
for key, val in self.assign_placeholders.items()})
示例9: load_batch_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def load_batch_norm(idx, variables, weights, assign_ops, offset):
"""Loads kernel, gamma, beta, mean, variance for Batch Normalization"""
kernel = variables[idx]
gamma, beta, mean, variance = variables[idx + 1:idx + 5]
batch_norm_vars = [beta, gamma, mean, variance]
for var in batch_norm_vars:
shape = var.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[offset:offset + num_params].reshape(shape)
offset += num_params
assign_ops.append(tf.assign(var, var_weights))
shape = kernel.shape.as_list()
num_params = np.prod(shape)
var_weights = weights[offset:offset + num_params].reshape((shape[3], shape[2], shape[0], shape[1]))
var_weights = np.transpose(var_weights, (2, 3, 1, 0))
offset += num_params
assign_ops.append(tf.assign(kernel, var_weights))
return assign_ops, offset
示例10: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def __init__(self, *args, **kwargs):
self.args, self.kwargs = args, kwargs
self.scope = self._initialize(*args, **kwargs)
self.all_variables = tf.get_collection(tf.GraphKeys.VARIABLES, self.scope.name)
self.trainable_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name)
self.num_params = sum(int(np.prod(v.get_shape().as_list())) for v in self.trainable_variables)
self._setfromflat = U.SetFromFlat(self.trainable_variables)
self._getflat = U.GetFlat(self.trainable_variables)
logger.info('Trainable variables ({} parameters)'.format(self.num_params))
for v in self.trainable_variables:
shp = v.get_shape().as_list()
logger.info('- {} shape:{} size:{}'.format(v.name, shp, np.prod(shp)))
logger.info('All variables')
for v in self.all_variables:
shp = v.get_shape().as_list()
logger.info('- {} shape:{} size:{}'.format(v.name, shp, np.prod(shp)))
placeholders = [tf.placeholder(v.value().dtype, v.get_shape().as_list()) for v in self.all_variables]
self.set_all_vars = U.function(
inputs=placeholders,
outputs=[],
updates=[tf.group(*[v.assign(p) for v, p in zip(self.all_variables, placeholders)])]
)
示例11: sync_from_root
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for var in variables:
if rank == 0:
comm.Bcast(sess.run(var))
else:
import tensorflow as tf
returned_var = np.empty(var.shape, dtype='float32')
comm.Bcast(returned_var)
sess.run(tf.assign(var, returned_var))
示例12: load_old_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def load_old_graph(self, ckpt):
ckpt_loader = create_loader(ckpt)
self.say(old_graph_msg.format(ckpt))
for var in tf.global_variables():
name = var.name.split(':')[0]
args = [name, var.get_shape()]
val = ckpt_loader(args)
assert val is not None, \
'Cannot find and load {}'.format(var.name)
shp = val.shape
plh = tf.placeholder(tf.float32, shp)
op = tf.assign(var, plh)
self.sess.run(op, {plh: val})
示例13: set_vars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def set_vars(var_to_value_dict):
ops = []
feed_dict = {}
for var, value in var_to_value_dict.items():
assert is_tf_expression(var)
try:
setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op
except KeyError:
with absolute_name_scope(var.name.split(':')[0]):
with tf.control_dependencies(None): # ignore surrounding control_dependencies
setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter
ops.append(setter)
feed_dict[setter.op.inputs[1]] = value
run(ops, feed_dict)
#----------------------------------------------------------------------------
# Autosummary creates an identity op that internally keeps track of the input
# values and automatically shows up in TensorBoard. The reported value
# represents an average over input components. The average is accumulated
# constantly over time and flushed when save_summaries() is called.
#
# Notes:
# - The output tensor must be used as an input for something else in the
# graph. Otherwise, the autosummary op will not get executed, and the average
# value will not get accumulated.
# - It is perfectly fine to include autosummaries with the same name in
# several places throughout the graph, even if they are executed concurrently.
# - It is ok to also pass in a python scalar or numpy array. In this case, it
# is added to the average immediately.
示例14: setup_as_moving_average_of
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):
assert isinstance(src_net, Network)
with absolute_name_scope(self.scope):
with tf.name_scope('MovingAvg'):
ops = []
for name, var in self.vars.items():
if name in src_net.vars:
cur_beta = beta if name in self.trainables else beta_nontrainable
new_value = lerp(src_net.vars[name], var, cur_beta)
ops.append(var.assign(new_value))
return tf.group(*ops)
# Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).
示例15: create_sync_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign [as 别名]
def create_sync_ops(self, host_device):
"""Create an assignment operation for each weight on all devices. The
weight is assigned the value of the copy on the `host_device'.
"""
sync_ops = []
host_params = self.params_device[host_device]
for device, params in (self.params_device).iteritems():
if device == host_device:
continue
for k in self.params_names:
if isinstance(params[k], tf.Variable):
sync_ops += [tf.assign(params[k], host_params[k])]
return sync_ops