本文整理汇总了Python中tensorflow.get_local_variable方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.get_local_variable方法的具体用法?Python tensorflow.get_local_variable怎么用?Python tensorflow.get_local_variable使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.get_local_variable方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_local_variable [as 别名]
def __init__(self, batch_env, step, is_training, should_log, config):
self._batch_env = batch_env
self._step = step # Trainer step, not environment step.
self._is_training = is_training
self._should_log = should_log
self._config = config
self._cell = config.cell
state = self._cell.zero_state(len(batch_env), tf.float32)
var_like = lambda x: tf.get_local_variable(
x.name.split(':')[0].replace('/', '_') + '_var',
shape=x.shape,
initializer=lambda *_, **__: tf.zeros_like(x), use_resource=True)
self._state = nested.map(var_like, state)
self._prev_action = tf.get_local_variable(
'prev_action_var', shape=self._batch_env.action.shape,
initializer=lambda *_, **__: tf.zeros_like(self._batch_env.action),
use_resource=True)
示例2: value_op_with_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_local_variable [as 别名]
def value_op_with_initializer(value_op_fn, init_op_fn):
"""Make value_op that gets set by idempotent init_op on first invocation."""
init_has_been_run = tf.get_local_variable(
'has_been_run',
initializer=np.zeros(shape=(), dtype=np.bool),
dtype=tf.bool)
value_op = value_op_fn()
def run_init_and_toggle():
init_op = init_op_fn(value_op)
with tf.control_dependencies([init_op]):
assign_op = init_has_been_run.assign(True)
with tf.control_dependencies([assign_op]):
return tf.identity(value_op)
return tf.cond(init_has_been_run, lambda: value_op, run_init_and_toggle)
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_local_variable [as 别名]
def __init__(self, batch_env, step, is_training, should_log, config):
self._step = step # Trainer step, not environment step.
self._is_training = is_training
self._should_log = should_log
self._config = config
self._cell = config.cell
self._num_envs = len(batch_env)
state = self._cell.zero_state(self._num_envs, tf.float32)
var_like = lambda x: tf.get_local_variable(
x.name.split(':')[0].replace('/', '_') + '_var',
shape=x.shape,
initializer=lambda *_, **__: tf.zeros_like(x), use_resource=True)
self._state = nested.map(var_like, state)
batch_action_shape = (self._num_envs,) + batch_env.action_space.shape
self._prev_action = tf.get_local_variable(
'prev_action_var', shape=batch_action_shape,
initializer=lambda *_, **__: tf.zeros(batch_action_shape),
use_resource=True)
示例4: local_state_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_local_variable [as 别名]
def local_state_variables(init_values, return_init_values):
"""Create local variables initialized from init_values.
This will create local variables from a list of init_values. Each variable
will be named based on the value's shape and dtype.
As a convenience, a boolean tensor allows you to return value from
the created local variable or from the original init value.
Args:
init_values: iterable of tensors
return_init_values: boolean tensor
Returns:
local_vars: list of the created local variables.
vals: if return_init_values is true, then this returns the values of
init_values. Otherwise it returns the values of the local_vars.
"""
if not init_values:
return [], []
# This generates a harmless warning when saving the metagraph.
variable_use_count = tf.get_collection_ref(_LOCAL_STATE_VARIABLE_COLLECTION)
if not variable_use_count:
variable_use_count.append(collections.defaultdict(int))
variable_use_count = variable_use_count[0]
local_vars = []
with tf.variable_scope(OPTIMIZER_SCOPE):
# We can't use the init_value as an initializer as init_value may
# itself depend on some problem variables. This would produce
# inter-variable initialization order dependence which TensorFlow
# sucks at making easy.
for init_value in init_values:
name = create_local_state_variable_name(init_value)
unique_name = name + "_" + str(variable_use_count[name])
variable_use_count[name] += 1
# The overarching idea here is to be able to reuse variables between
# different sessions on the same TensorFlow master without errors. By
# uniquifying based on the type and name we mirror the checks made inside
# TensorFlow, while still allowing some memory reuse. Ultimately this is a
# hack due to the broken Session.reset().
local_vars.append(
tf.get_local_variable(
unique_name,
initializer=tf.zeros(
init_value.get_shape(), dtype=init_value.dtype)))
# It makes things a lot simpler if we use the init_value the first
# iteration, instead of the variable itself. It allows us to propagate
# gradients through it as well as simplifying initialization. The variable
# ends up assigned to after the first iteration.
vals = tf.cond(return_init_values, lambda: init_values, lambda: local_vars)
if len(init_values) == 1:
# tf.cond extracts elements from singleton lists.
vals = [vals]
return local_vars, vals