本文整理汇总了Python中tensorflow.local_variables方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.local_variables方法的具体用法?Python tensorflow.local_variables怎么用?Python tensorflow.local_variables使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.local_variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sample_buffer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def test_sample_buffer():
capacity = 100
batch = 17
lots = 100
with tf.Graph().as_default(), tf.Session() as sess:
buffer = utils.SampleBuffer(capacity=capacity, schemas=dict(x=utils.Schema(tf.int32, ())))
tf.variables_initializer(tf.global_variables() + tf.local_variables()).run()
i_p = tf.placeholder(dtype=tf.int32, shape=())
add = buffer.add(x=batch * i_p + tf.range(batch))
sample = buffer.sample(lots, seed=7)['x']
all_data_1 = buffer.data()
all_data_2 = buffer.read(tf.range(buffer.size()))
for i in range(20):
add.run(feed_dict={i_p: i})
samples = sample.eval()
hi = batch * (i + 1)
lo = max(0, hi - capacity)
assert lo <= samples.min() <= lo + 3
assert hi - 5 <= samples.max() < hi
np.testing.assert_equal(sess.run(all_data_1), sess.run(all_data_2))
示例2: global_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def global_variables():
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
@{tf.local_variables}
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
示例3: local_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def local_variables():
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
@{tf.global_variables}
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
示例4: global_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def global_variables():
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
[`tf.local_variables()`](../../api_docs/python/state_ops.md#local_variables)
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
示例5: local_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def local_variables():
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `local_variable()` automatically adds new variable to
`GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
[`tf.global_variables()`](../../api_docs/python/state_ops.md#global_variables)
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES)
示例6: _test_streaming_sparse_precision_at_top_k
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=tf.constant(top_k_predictions, tf.int32),
labels=labels, class_id=class_id, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
tf.initialize_variables(tf.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
示例7: _test_streaming_sparse_average_precision_at_k
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def _test_streaming_sparse_average_precision_at_k(
self, predictions, labels, k, expected, weights=None):
with tf.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = tf.constant(weights, tf.float32)
predictions = tf.constant(predictions, tf.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(tf.OpError, metric.eval)
self.assertRaises(tf.OpError, update.eval)
local_variables = tf.local_variables()
tf.initialize_variables(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
示例8: get_post_init_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def get_post_init_ops(self):
# Copy initialized variables for variables on the parameter server
# to the local copy of the variable.
local_vars = tf.local_variables()
local_var_by_name = dict(
[(self._strip_port(v.name), v) for v in local_vars])
post_init_ops = []
for v in tf.global_variables():
if v.name.startswith(PS_SHADOW_VAR_PREFIX + '/v0/'):
prefix = self._strip_port(
v.name[len(PS_SHADOW_VAR_PREFIX + '/v0'):])
for i in range(self.benchmark_cnn.num_gpus):
name = 'v%s%s' % (i, prefix)
if name in local_var_by_name:
copy_to = local_var_by_name[name]
post_init_ops.append(copy_to.assign(v.read_value()))
return post_init_ops
示例9: savable_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def savable_variables(self):
"""Returns a list/dict of savable variables to pass to tf.train.Saver."""
params = {}
for v in tf.global_variables():
assert (v.name.startswith(PS_SHADOW_VAR_PREFIX + '/v0/') or
v.name == 'global_step:0')
# We store variables in the checkpoint with the shadow variable prefix
# removed so we can evaluate checkpoints in non-distributed replicated
# mode. The checkpoints can also be loaded for training in
# distributed_replicated mode.
name = self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
params[name] = v
for v in tf.local_variables():
# Non-trainable variables, such as batch norm moving averages, do not have
# corresponding global shadow variables, so we add them here. Trainable
# local variables have corresponding global shadow variables, which were
# added in the global variable loop above.
if v.name.startswith('v0/') and v not in tf.trainable_variables():
params[self._strip_port(v.name)] = v
return params
示例10: _get_initial_sync_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def _get_initial_sync_op(self):
"""
Get the op to copy-initialized all local variables from PS.
"""
def strip_port(s):
if s.endswith(':0'):
return s[:-2]
return s
local_vars = tf.local_variables()
local_var_by_name = dict([(strip_port(v.name), v) for v in local_vars])
ops = []
nr_shadow_vars = len(self._shadow_vars)
for v in self._shadow_vars:
vname = strip_port(v.name)
for i in range(self.nr_gpu):
name = 'tower%s/%s' % (i, vname)
assert name in local_var_by_name, \
"Shadow variable {} doesn't match a corresponding local variable!".format(v.name)
copy_to = local_var_by_name[name]
# logger.info("{} -> {}".format(v.name, copy_to.name))
ops.append(copy_to.assign(v.read_value()))
return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
示例11: _get_initial_sync_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def _get_initial_sync_op(self):
"""
Get the op to copy-initialized all local variables from PS.
"""
def strip_port(s):
if s.endswith(':0'):
return s[:-2]
return s
local_vars = tf.local_variables()
local_var_by_name = {strip_port(v.name): v for v in local_vars}
ops = []
nr_shadow_vars = len(self._shadow_vars)
for v in self._shadow_vars:
vname = strip_port(v.name)
for i in range(self.nr_gpu):
name = 'tower%s/%s' % (i, vname)
assert name in local_var_by_name, \
"Shadow variable {} doesn't match a corresponding local variable!".format(v.name)
copy_to = local_var_by_name[name]
# logger.info("{} -> {}".format(v.name, copy_to.name))
ops.append(copy_to.assign(v.read_value()))
return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
示例12: guarantee_initialized_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def guarantee_initialized_variables(session, variables=None):
"""Guarantee that all the specified variables are initialized.
If a variable is already initialized, leave it alone. Otherwise, initialize it.
If no variables are specified, checks all variables in the default graph.
Args:
variables (list[tf.Variable])
"""
name_to_var = {v.op.name: v for v in tf.global_variables() + tf.local_variables()}
uninitialized_variables = list(name_to_var[name] for name in
session.run(tf.report_uninitialized_variables(variables)))
init_op = tf.variables_initializer(uninitialized_variables)
session.run(init_op)
return uninitialized_variables
示例13: auc_roc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def auc_roc(y_true, y_pred):
"""
Defines AUC ROC metric callback, inspired by https://github.com/keras-team/keras/issues/6050#issuecomment-329996505
"""
import tensorflow as tf
# any tensorflow metric
value, update_op = tf.metrics.auc(y_true, y_pred)
# find all variables created for this metric
metric_vars = [i for i in tf.local_variables() if 'auc_roc' in i.name.split('/')[1]]
# Add metric variables to GLOBAL_VARIABLES collection.
# They will be initialized for new session.
for v in metric_vars:
tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, v)
# force to update metric values
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
示例14: create_global_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def create_global_variables(local_optimizer_vars = []):
"""Creates global variables for local variables on the graph.
Skips variables local variables that are created for
local optimization.
Returns dictionarys for local-to-global and global-to-local
variable mappings.
"""
local_to_global = {}
global_to_local = {}
with tf.device('/job:ps/task:0'):
for v in tf.local_variables():
if v not in local_optimizer_vars:
v_g = tf.get_variable('g/'+v.op.name,
shape = v.shape,
dtype = v.dtype,
trainable=True,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.TRAINABLE_VARIABLES])
local_to_global[v] = v_g
global_to_local[v_g] = v
return local_to_global,global_to_local
示例15: create_global_variables
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import local_variables [as 别名]
def create_global_variables():
"""Creates global variables for local variables on the graph.
Returns dictionarys for local-to-global and global-to-local
variable mappings.
"""
local_to_global = {}
global_to_local = {}
with tf.device('/job:ps/task:0'):
for v in tf.local_variables():
v_g = tf.get_variable('g/'+v.op.name,
shape = v.shape,
dtype = v.dtype,
trainable=True,
collections=[tf.GraphKeys.GLOBAL_VARIABLES,tf.GraphKeys.TRAINABLE_VARIABLES])
local_to_global[v] = v_g
global_to_local[v_g] = v
return local_to_global,global_to_local