本文整理汇总了Python中tensorflow.variable_axis_size_partitioner方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.variable_axis_size_partitioner方法的具体用法?Python tensorflow.variable_axis_size_partitioner怎么用?Python tensorflow.variable_axis_size_partitioner使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.variable_axis_size_partitioner方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testControlDepsNone
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_axis_size_partitioner [as 别名]
def testControlDepsNone(self):
with self.test_session() as session:
c = tf.constant(1.0)
with tf.control_dependencies([c]):
# d get the control dependency.
d = tf.constant(2.0)
# Partitioned variables do not.
var_x = tf.get_variable(
"x",
initializer=tf.ones_initializer([2]),
partitioner=tf.variable_axis_size_partitioner(4))
ops_before_read = session.graph.get_operations()
var_x.as_tensor() # Caches the ops for subsequent reads.
reading_ops = [op for op in session.graph.get_operations()
if op not in ops_before_read]
self.assertEqual([c.op], d.op.control_inputs)
# Tests that no control dependencies are added to reading a partitioned
# variable which is similar to reading a variable.
for op in reading_ops:
self.assertEqual([], op.control_inputs)
示例2: testConcat
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_axis_size_partitioner [as 别名]
def testConcat(self):
with self.test_session() as session:
var_x = tf.get_variable(
"x",
initializer=tf.constant([1., 2.]),
partitioner=tf.variable_axis_size_partitioner(4))
c = tf.constant(1.0)
with tf.control_dependencies([c]):
ops_before_concat = session.graph.get_operations()
value = var_x._concat() # pylint: disable=protected-access
concat_ops = [op for op in session.graph.get_operations()
if op not in ops_before_concat]
concat_control_inputs = [ci for op in concat_ops
for ci in op.control_inputs]
self.assertTrue(
c.op in concat_control_inputs,
"var_x._concat() should get control dependencies from its scope.")
tf.global_variables_initializer().run()
self.assertAllClose(value.eval(), var_x.as_tensor().eval())
示例3: _testVariableAxisSizePartitioner
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_axis_size_partitioner [as 别名]
def _testVariableAxisSizePartitioner(self, name, axis, max_shard_bytes,
expected_axis_shards,
expected_partitions,
max_shards=None):
partitioner = tf.variable_axis_size_partitioner(
axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)
with tf.variable_scope("root", partitioner=partitioner):
v0 = tf.get_variable(name, dtype=tf.float32, shape=(4, 8, 16, 32))
v0_list = v0._get_variable_list()
v0_part = v0._get_partitions()
self.assertEqual(len(v0_list), expected_axis_shards)
self.assertAllEqual(v0_part, expected_partitions)
示例4: benchmark_create_1000_partitions_with_100_parameter_servers
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_axis_size_partitioner [as 别名]
def benchmark_create_1000_partitions_with_100_parameter_servers(self):
workers, _ = create_local_cluster(num_workers=1, num_ps=100)
worker_sessions = [tf.Session(w.target) for w in workers]
worker = worker_sessions[0]
partition_sizes = (1, 512, 1024*32, 1024*128)
partitioned = []
for partition_size in partition_sizes:
# max_shard_bytes is 4, shape is 1000*partition_size float32s which should
# partition into 1000 shards, each containing partition_size float32s.
print("Building partitioned variable with %d floats per partition"
% partition_size)
with tf.device(tf.train.replica_device_setter(ps_tasks=100)):
partitioned_ix = tf.get_variable(
"partitioned_%d" % partition_size,
shape=[1000 * partition_size],
dtype=tf.float32,
# Each partition to have exactly N float32s
partitioner=tf.variable_axis_size_partitioner(
max_shard_bytes=4 * partition_size))
# Concatenates along axis 0
partitioned.append(tf.convert_to_tensor(partitioned_ix))
tf.global_variables_initializer().run(session=worker)
for ix, partition_size in enumerate(partition_sizes):
print("Running benchmark having partitions with %d floats"
% partition_size)
self.run_op_benchmark(
worker,
partitioned[ix],
name=("read_concat_1000_partitions_from_"
"100_parameter_servers_partsize_%d_floats" % partition_size))