本文整理汇总了Python中tensorflow.python.ops.gen_array_ops._concat_offset函数的典型用法代码示例。如果您正苦于以下问题:Python _concat_offset函数的具体用法?Python _concat_offset怎么用?Python _concat_offset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_concat_offset函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testNegativeDim
def testNegativeDim(self):
with self.test_session(use_gpu=True) as sess:
cdim = constant_op.constant(-2, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
cdim = constant_op.constant(-3, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([1, 3, 5], dtypes.int32)
s2 = constant_op.constant([3, 3, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [2, 0, 0], [3, 0, 0]])
示例2: testDimMismatch
def testDimMismatch(self):
with self.test_session() as sess:
cdim = tf.constant(1, tf.int32)
s0 = tf.constant([2, 3, 5], tf.int32)
s1 = tf.constant([2, 7, 5, 10], tf.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
r"should contain 3 elem"):
sess.run(off)
示例3: testConcatDimOutOfRange
def testConcatDimOutOfRange(self):
with self.test_session() as sess:
cdim = tf.constant(4, tf.int32)
s0 = tf.constant([2, 3, 5], tf.int32)
s1 = tf.constant([2, 7, 5], tf.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
r"Concat dim is out of range: 4 vs. 3"):
sess.run(off)
示例4: testNotVector
def testNotVector(self):
with self.test_session() as sess:
cdim = tf.constant(1, tf.int32)
s0 = tf.constant([[2, 3, 5]], tf.int32)
s1 = tf.constant([[2, 7, 5]], tf.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
r"should be a vector"):
sess.run(off)
示例5: testBasic
def testBasic(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu) as sess:
cdim = tf.constant(1, tf.int32)
s0 = tf.constant([2, 3, 5], tf.int32)
s1 = tf.constant([2, 7, 5], tf.int32)
s2 = tf.constant([2, 20, 5], tf.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
示例6: testBasic
def testBasic(self):
with self.test_session() as sess:
with self.test_scope():
cdim = constant_op.constant(1, dtypes.int32)
s0 = constant_op.constant([2, 3, 5], dtypes.int32)
s1 = constant_op.constant([2, 7, 5], dtypes.int32)
s2 = constant_op.constant([2, 20, 5], dtypes.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1, s2])
ans = sess.run(off)
self.assertAllEqual(ans, [[0, 0, 0], [0, 3, 0], [0, 10, 0]])
示例7: testSizeMismatch
def testSizeMismatch(self):
with self.test_session() as sess:
cdim = tf.constant(1, tf.int32)
s0 = tf.constant([2, 3, 5], tf.int32)
s1 = tf.constant([2, 7, 10], tf.int32)
off = gen_array_ops._concat_offset(cdim, [s0, s1])
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError,
r"All dimensions except 1 must match. Input 1 has shape \[2 7 10\] "
r"and doesn't match input 0 with shape \[2 3 5\]."):
sess.run(off)
示例8: _ConcatGrad
def _ConcatGrad(op, grad):
"""Gradient for concat op."""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat(0,
[array_ops.fill(
array_ops.expand_dims(concat_dim, 0), 0),
[1],
array_ops.fill(
shape_of_shape - concat_dim - 1, 0)])
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return [None, grad]
concat_dim = op.inputs[0]
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = array_ops.shape_n(op.inputs[1:])
# pylint: disable=protected-access
offset = gen_array_ops._concat_offset(concat_dim, sizes)
# pylint: enable=protected-access
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in op.inputs[1:]]
if concat_dim_static > 0:
# IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
# with all the indices, but with grad.values sliced accordingly. This
# is like the Tensor case, except shape(grad.values)[0] is not equal to
# shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values,
begin,
array_ops.concat(0, [[-1], array_ops.slice(size, [1], [-1])]))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
# only for the relevant indices.
start = constant_op.constant(0, dtype=grad.indices.dtype)
for size in sizes:
size_concat_dim = array_ops.gather(size, concat_dim)
if size_concat_dim.dtype != grad.indices.dtype:
size_concat_dim = math_ops.cast(size_concat_dim,
dtype=grad.indices.dtype)
end = start + size_concat_dim
# Compute the 1-D Tensor of indices relevant for this input.
indices_to_select = array_ops.squeeze(
array_ops.where(math_ops.logical_and(grad.indices >= start,
grad.indices < end)),
squeeze_dims=[1])
new_indices = array_ops.gather(grad.indices, indices_to_select) - start
new_values = array_ops.gather(grad.values, indices_to_select)
out_grads.append(
ops.IndexedSlices(new_values, new_indices, size))
start = end
else:
raise TypeError("Expected Tensor or IndexedSlices, got %s" % type(grad))
return [None] + out_grads
示例9: _ConcatGradHelper
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors represending the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[0])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat([
array_ops.fill(array_ops.expand_dims(concat_dim, 0), 0), [1],
array_ops.fill(shape_of_shape - concat_dim - 1, 0)
], 0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
else:
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
# Using mod here for convenience since concat_dim is already verified
# in concat implementation to be within the allowed [-rank, rank) range.
non_neg_concat_dim = concat_dim % array_ops.rank(input_values[0])
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The magic number of 16 was found through benchmarking a range of sizes
# on CPUs and a Maxwell TitanX. A speedup was seen in a large majority of
# cases when switching implementations at N=16, but it is possible that
# there will be a small number of performance regressions.
# pylint: disable=protected-access
if len(sizes) > 16:
# extract the size of each input along the concat dimension
sizes = array_ops.squeeze(
array_ops.slice(
array_ops.stack(
sizes, axis=1), [non_neg_concat_dim, 0], [1, -1]))
out_grads = array_ops.split(grad, sizes, non_neg_concat_dim)
else:
offset = gen_array_ops._concat_offset(non_neg_concat_dim, sizes)
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
# pylint: enable=protected-access
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
if concat_dim_static < 0:
rank = tensor_util.constant_value(array_ops.rank(input_values[0]))
if rank is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"negative concat_dim when first value rank is "
"statically-known.")
concat_dim_static %= rank
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, non_neg_concat_dim > 0. Each input gets IndexedSlices
# gradients with all the indices, but with grad.values sliced accordingly.
# This is like the Tensor case, except shape(grad.values)[0] is not equal
#.........这里部分代码省略.........
示例10: _ConcatGradHelper
def _ConcatGradHelper(op, grad, start_value_index, end_value_index, dim_index):
"""Gradient for concat op.
Args:
op: An operation.
grad: `Tensor` or `IndexedSlices` representing the gradients with respect
to each output of the op.
start_value_index: An integer index of the first value in the op.inputs.
end_value_index: An integer index of the last value in the op.inputs.
dim_index: An interger index of concat_dim or axis parameter in op.inputs.
Returns:
Tensors represending the partial gradients with respect to each input
of the op.
Raises:
ValueError: if concat_dim/axis is not statically known.
"""
def _CreateDenseMaskAndBegin(sizes, concat_dim):
"""Create variables for iteratively slicing a dense gradients tensor."""
# Since shape is 1-D, shape_of_shape = [rank-of-inputs]
shape_of_shape = array_ops.shape(sizes[dim_index])
# Make a vector of length equal to the input's dimensions,
# with 0's everywhere and 1 in the concat dim position.
# Note: Can't use sparse_to_dense since it isn't GPU-capable (for now)
mask = array_ops.concat_v2(
[array_ops.fill(
array_ops.expand_dims(concat_dim, 0), 0),
[1],
array_ops.fill(
shape_of_shape - concat_dim - 1, 0)],
0)
begin = array_ops.fill(shape_of_shape, 0)
return mask, begin
def _ExtractInputShapes(inputs):
"""Extract the shapes of a set of input tensors."""
sizes = []
fully_known = True
for x in inputs:
input_shape = array_ops.shape(x)
if not isinstance(input_shape,
ops.Tensor) or input_shape.op.type != "Const":
fully_known = False
break
else:
sizes.append(input_shape)
if fully_known:
return sizes
else:
return array_ops.shape_n(inputs)
# Degenerate concatenation, just return grad.
if len(op.inputs) == 2:
return grad + [None] if end_value_index <= dim_index else [None] + grad
concat_dim = op.inputs[dim_index]
input_values = op.inputs[start_value_index:end_value_index]
out_grads = []
if isinstance(grad, ops.Tensor):
# Get the inputs' tensor shapes
sizes = _ExtractInputShapes(input_values)
# The following line to be enabled once ready
# if len(sizes) > 16:
# sizes = array_ops.squeeze(array_ops.slice(
# array_ops.pack(sizes, axis=1), [concat_dim, 0], [1, -1]))
# out_grads = array_ops.split_v(grad, sizes, concat_dim)
# else:
# pylint: disable=protected-access
offset = gen_array_ops._concat_offset(concat_dim, sizes)
# pylint: enable=protected-access
for (begin, size) in zip(offset, sizes):
out_grads.append(array_ops.slice(grad, begin, size))
elif isinstance(grad, ops.IndexedSlices):
concat_dim_static = tensor_util.constant_value(concat_dim)
if concat_dim_static is None:
raise ValueError("Can only compute IndexedSlices gradient with "
"statically-known concat_dim")
# Get the inputs' tensor shapes
sizes = [array_ops.shape(x) for x in input_values]
if concat_dim_static > 0:
# IndexedSlices, concat_dim > 0. Each input gets IndexedSlices gradients
# with all the indices, but with grad.values sliced accordingly. This
# is like the Tensor case, except shape(grad.values)[0] is not equal to
# shape(sizes[i])[0], since only a subset of the dim-0 values are stored.
mask, begin = _CreateDenseMaskAndBegin(sizes, concat_dim)
for size in sizes:
new_values = array_ops.slice(
grad.values,
begin,
array_ops.concat_v2(
[[-1], array_ops.slice(size, [1], [-1])], 0))
out_grads.append(
ops.IndexedSlices(new_values, grad.indices, size))
# Lint complains begin = begin + ...
begin = math_ops.add(begin, size * mask)
else:
# IndexedSlices, concat_dim == 0. Each input gets IndexedSlices gradients
#.........这里部分代码省略.........