本文整理汇总了Python中tensorflow.python.ops.data_flow_ops.dynamic_partition函数的典型用法代码示例。如果您正苦于以下问题:Python dynamic_partition函数的具体用法?Python dynamic_partition怎么用?Python dynamic_partition使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dynamic_partition函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lookup
def lookup(self, keys, name=None):
if keys.dtype != self._key_dtype:
raise TypeError('Signature mismatch. Keys must be dtype %s, got %s.' %
(self._key_dtype, keys.dtype))
self._check_keys(keys)
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].lookup(keys, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices,
num_shards)
value_shards = [
self._table_shards[i].lookup(key_shards[i], name=name)
for i in range(num_shards)
]
num_keys = keys.get_shape().dims[0]
original_indices = math_ops.range(num_keys)
partitioned_indices = data_flow_ops.dynamic_partition(original_indices,
shard_indices,
num_shards)
result = data_flow_ops.dynamic_stitch(partitioned_indices, value_shards)
result.set_shape(
tensor_shape.TensorShape([num_keys]).concatenate(self._value_shape))
return result
示例2: insert
def insert(self, keys, values, name=None):
num_shards = self._num_shards
if num_shards == 1:
return self._table_shards[0].insert(keys, values, name=name)
shard_indices = self._shard_indices(keys)
# TODO(andreasst): support 'keys' that are not vectors
key_shards = data_flow_ops.dynamic_partition(keys, shard_indices, num_shards)
value_shards = data_flow_ops.dynamic_partition(values, shard_indices, num_shards)
return_values = [
self._table_shards[i].insert(key_shards[i], value_shards[i], name=name) for i in range(num_shards)
]
return control_flow_ops.group(*return_values)
示例3: _make_per_class_queues
def _make_per_class_queues(tensor_list, labels, num_classes, queue_capacity, threads_per_queue):
"""Creates per-class-queues based on data and labels."""
# Create one queue per class.
queues = []
data_shapes = []
data_dtypes = []
for data_tensor in tensor_list:
per_data_shape = data_tensor.get_shape().with_rank_at_least(1)[1:]
per_data_shape.assert_is_fully_defined()
data_shapes.append(per_data_shape)
data_dtypes.append(data_tensor.dtype)
for i in range(num_classes):
q = data_flow_ops.FIFOQueue(
capacity=queue_capacity, shapes=data_shapes, dtypes=data_dtypes, name="stratified_sample_class%d_queue" % i
)
logging_ops.scalar_summary("queue/%s/stratified_sample_class%d" % (q.name, i), q.size())
queues.append(q)
# Partition tensors according to labels. `partitions` is a list of lists, of
# size num_classes X len(tensor_list). The number of tensors in partition `i`
# should be the same for all tensors.
all_partitions = [data_flow_ops.dynamic_partition(data, labels, num_classes) for data in tensor_list]
partitions = [[cur_partition[i] for cur_partition in all_partitions] for i in range(num_classes)]
# Enqueue each tensor on the per-class-queue.
for i in range(num_classes):
enqueue_op = (queues[i].enqueue_many(partitions[i]),)
queue_runner.add_queue_runner(queue_runner.QueueRunner(queues[i], [enqueue_op] * threads_per_queue))
return queues
示例4: _make_per_class_queues
def _make_per_class_queues(data, labels, num_classes, queue_capacity,
threads_per_queue):
"""Creates per-class-queues based on data and labels."""
# Create one queue per class.
queues = []
per_data_shape = data.get_shape().with_rank_at_least(1)[1:]
per_data_shape.assert_is_fully_defined()
for i in range(num_classes):
q = data_flow_ops.FIFOQueue(capacity=queue_capacity,
shapes=per_data_shape, dtypes=[data.dtype],
name='stratified_sample_class%d_queue' % i)
logging_ops.scalar_summary('queue/stratified_sample_class%d' % i, q.size())
queues.append(q)
# Partition tensors according to labels.
partitions = data_flow_ops.dynamic_partition(data, labels, num_classes)
# Enqueue each tensor on the per-class-queue.
for i in range(num_classes):
enqueue_op = queues[i].enqueue_many(partitions[i]),
queue_runner.add_queue_runner(queue_runner.QueueRunner(
queues[i], [enqueue_op] * threads_per_queue))
return queues
示例5: testScalarIndexOutOfRange
def testScalarIndexOutOfRange(self):
with self.test_session() as sess:
bad = 17
data = np.zeros(5)
partitions = data_flow_ops.dynamic_partition(data, bad, num_partitions=7)
with self.assertRaisesOpError(r"partitions = 17 is not in \[0, 7\)"):
sess.run(partitions)
示例6: testErrorIndexOutOfRange
def testErrorIndexOutOfRange(self):
with self.test_session() as sess:
data = constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, 10, 11],
[12, 13, 14]])
indices = constant_op.constant([0, 2, 99, 2, 2])
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
with self.assertRaisesOpError(r"partitions\[2\] = 99 is not in \[0, 4\)"):
sess.run(partitions)
示例7: testCUBBug
def testCUBBug(self):
x = constant_op.constant(np.random.randn(3072))
inds = [0]*189 + [1]*184 + [2]*184 + [3]*191 + [4]*192 + [5]*195 + [6]*195
inds += [7]*195 + [8]*188 + [9]*195 + [10]*188 + [11]*202 + [12]*194
inds += [13]*194 + [14]*194 + [15]*192
self.assertEqual(len(inds), x.shape[0])
partitioned = data_flow_ops.dynamic_partition(x, inds, 16)
with self.test_session() as sess:
res = sess.run(partitioned)
self.assertEqual(res[-1].shape[0], 192)
示例8: testEmptyPartitions
def testEmptyPartitions(self):
data_list = []
indices_list = []
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([], partition_vals[1])
示例9: testSimpleComplex
def testSimpleComplex(self):
data_list = [1 + 2j, 3 + 4j, 5 + 6j, 7 + 8j]
indices_list = [1, 0, 1, 0]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.complex64)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertAllEqual([3 + 4j, 7 + 8j], partition_vals[0])
self.assertAllEqual([1 + 2j, 5 + 6j], partition_vals[1])
示例10: scatter_update
def scatter_update(cls, factor, indices, values, sharding_func):
"""Helper function for doing sharded scatter update."""
assert isinstance(factor, list)
if len(factor) == 1:
with ops.colocate_with(factor[0]):
# TODO(agarwal): assign instead of scatter update for full batch update.
return state_ops.scatter_update(factor[0], indices, values).op
else:
num_shards = len(factor)
assignments, new_ids = sharding_func(indices)
assert assignments is not None
assignments = math_ops.cast(assignments, dtypes.int32)
sharded_ids = data_flow_ops.dynamic_partition(new_ids, assignments,
num_shards)
sharded_values = data_flow_ops.dynamic_partition(values, assignments,
num_shards)
updates = []
for i in xrange(num_shards):
updates.append(
state_ops.scatter_update(factor[i], sharded_ids[i], sharded_values[
i]))
return control_flow_ops.group(*updates)
示例11: _DynamicPartitionGrads
def _DynamicPartitionGrads(op, *grads):
"""Gradients for DynamicPartition."""
data = op.inputs[0]
indices = op.inputs[1]
num_partitions = op.get_attr("num_partitions")
prefix_shape = array_ops.shape(indices)
original_indices = array_ops.reshape(
math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
partitioned_indices = data_flow_ops.dynamic_partition(
original_indices, indices, num_partitions)
reconstructed = data_flow_ops.dynamic_stitch(partitioned_indices, grads)
reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
return [reconstructed, None]
示例12: testEmptyDataTwoDimensional
def testEmptyDataTwoDimensional(self):
data_list = [[], []]
indices_list = [0, 1]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=3)
partition_vals = sess.run(partitions)
self.assertAllEqual([[]], partition_vals[0])
self.assertAllEqual([[]], partition_vals[1])
self.assertAllEqual(np.array([], dtype=np.float).reshape(0, 0),
partition_vals[2])
示例13: testHigherRankIndexOutOfRange
def testHigherRankIndexOutOfRange(self):
with self.test_session() as sess:
shape = (2, 3)
indices = array_ops.placeholder(shape=shape, dtype=np.int32)
data = np.zeros(shape + (5,))
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=7)
for i in xrange(2):
for j in xrange(3):
bad = np.zeros(shape, dtype=np.int32)
bad[i, j] = 17
with self.assertRaisesOpError(
r"partitions\[%d,%d\] = 17 is not in \[0, 7\)" % (i, j)):
sess.run(partitions, feed_dict={indices: bad})
示例14: testLargeOneDimensional
def testLargeOneDimensional(self):
num = 100000
data_list = [x for x in range(num)]
indices_list = [x % 2 for x in range(num)]
part1 = [x for x in range(num) if x % 2 == 0]
part2 = [x for x in range(num) if x % 2 == 1]
with self.test_session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=2)
partition_vals = sess.run(partitions)
self.assertAllEqual(part1, partition_vals[0])
self.assertAllEqual(part2, partition_vals[1])
示例15: testEmptyParts
def testEmptyParts(self):
data_list = [1, 2, 3, 4]
indices_list = [1, 3, 1, 3]
with self.session(use_gpu=True) as sess:
data = constant_op.constant(data_list, dtype=dtypes.float32)
indices = constant_op.constant(indices_list, dtype=dtypes.int32)
partitions = data_flow_ops.dynamic_partition(
data, indices, num_partitions=4)
partition_vals = sess.run(partitions)
self.assertEqual(4, len(partition_vals))
self.assertAllEqual([], partition_vals[0])
self.assertAllEqual([1, 3], partition_vals[1])
self.assertAllEqual([], partition_vals[2])
self.assertAllEqual([2, 4], partition_vals[3])