本文整理汇总了Python中tensorflow.python.ops.array_ops.concat函数的典型用法代码示例。如果您正苦于以下问题:Python concat函数的具体用法?Python concat怎么用?Python concat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConcatTuple
def testConcatTuple(self):
c1 = np.random.rand(4, 4)
c2 = np.random.rand(4, 4)
with self.test_session():
concat_list_t = array_ops.concat(0, [c1, c2])
concat_tuple_t = array_ops.concat(0, (c1, c2))
self.assertAllEqual(concat_list_t.eval(), concat_tuple_t.eval())
示例2: testConcatNoScalars
def testConcatNoScalars(self):
with self.cached_session():
scalar = constant_op.constant(7)
dim = array_ops.placeholder(dtypes.int32)
with self.assertRaisesRegexp(
ValueError, r"Can't concatenate scalars \(use tf\.stack instead\)"):
array_ops.concat([scalar, scalar, scalar], dim)
示例3: circular_pad
def circular_pad(input_, width, kernel_size):
"""Pad input_ for computing (circular) convolution.
Args:
input_: the input tensor
width: the width of the tensor.
kernel_size: the kernel size of the filter.
Returns:
a tensor whose width is (width + kernel_size - 1).
"""
beginning = kernel_size // 2
end = kernel_size - 1 - beginning
tmp_up = array_ops.slice(input_, [0, width - beginning, 0, 0],
[-1, beginning, width, -1])
tmp_down = array_ops.slice(input_, [0, 0, 0, 0], [-1, end, width, -1])
tmp = array_ops.concat([tmp_up, input_, tmp_down], 1)
new_width = width + kernel_size - 1
tmp_left = array_ops.slice(tmp, [0, 0, width - beginning, 0],
[-1, new_width, beginning, -1])
tmp_right = array_ops.slice(tmp, [0, 0, 0, 0], [-1, new_width, end, -1])
final = array_ops.concat([tmp_left, tmp, tmp_right], 2)
return final
示例4: _testGradientsSimple
def _testGradientsSimple(self, dtype):
# Test both positive and negative concat axis.
# -2 and 1 correspond to the same axis for 3-dimensional tensors.
for axis in [-2, 1]:
with self.cached_session(use_gpu=True):
inp = []
inp_tensors = []
for x in [1, 2, 6]:
shape = [10, x, 2]
t = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
t += -1j * t
inp.append(t)
inp_tensors.append(
constant_op.constant(
t.flatten(),
shape=shape,
dtype=dtype))
c = array_ops.concat(inp_tensors, axis)
output_shape = [10, 9, 2]
grad_inp = np.random.rand(*output_shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
grad_inp += -1j * grad_inp
grad_tensor = constant_op.constant(
grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, axis)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
示例5: _RunAndVerifyGradientsRandom
def _RunAndVerifyGradientsRandom(self):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with self.cached_session(use_gpu=True):
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype("f")
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape,
dtype=dtypes.float32))
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype("f")
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
示例6: _BiasAddGradGrad
def _BiasAddGradGrad(op, received_grad):
"""Gradient for the BiasAddGrad op.
Args:
op: BiasAddGrad op for which we are calculating gradients.
received_grad: The gradients passed to the BiasAddGrad op.
Returns:
A single gradient Tensor for the input to BiasAddGrad (which
is the gradient of the bias term in BiasAdd)
"""
try:
data_format = op.get_attr("data_format")
except ValueError:
data_format = None
shape = array_ops.shape(op.inputs[0])
rank = array_ops.rank(op.inputs[0])
bias_shape = array_ops.shape(received_grad)
if data_format == b"NCHW":
expanded_shape = array_ops.concat([
array_ops.ones_like(shape[:-3]), bias_shape,
array_ops.ones_like(shape[-2:])
], 0)
tile_mults = array_ops.concat([shape[:-3], [1], shape[-2:]], 0)
else:
expanded_shape = array_ops.concat(
[array_ops.ones_like(shape[:-1]), bias_shape], 0)
tile_mults = array_ops.concat([shape[:-1], [1]], 0)
expanded_grad = array_ops.reshape(received_grad, expanded_shape)
return array_ops.tile(expanded_grad, tile_mults)
示例7: _concat_along_batch_dim
def _concat_along_batch_dim(tensor_list):
"""Concatenate tensors along batch (first) dimension.
Args:
tensor_list: list of Tensors or list of tuples of Tensors.
Returns:
Tensor or tuple of Tensors.
Raises:
ValueError: If 'tensor_list' is empty.
"""
if not tensor_list:
raise ValueError(
"Cannot concatenate Tensors if there are no Tensors to concatenate.")
if isinstance(tensor_list[0], (tuple, list)):
# [(tensor1a, tensor1b),
# (tensor2a, tensor2b), ...] --> (tensor_a, tensor_b)
return tuple(
array_ops.concat(tensors, axis=0) for tensors in zip(*tensor_list))
else:
# [tensor1, tensor2] --> tensor
return array_ops.concat(tensor_list, axis=0)
示例8: sample
def sample(self, n, seed=None, name="sample"):
"""Generate `n` samples.
Args:
n: scalar. Number of samples to draw from each distribution.
seed: Python integer seed for RNG.
name: name to give to the op.
Returns:
samples: a `Tensor` of shape `(n,) + self.batch_shape` with values of type
`self.dtype`.
"""
with ops.name_scope(self.name):
with ops.op_scope([self.p, n], name):
n = ops.convert_to_tensor(n, name="n")
p_2d = array_ops.reshape(self.p, array_ops.pack([-1, 1]))
q_2d = 1. - p_2d
probs = array_ops.concat(1, [q_2d, p_2d])
samples = random_ops.multinomial(math_ops.log(probs), n, seed=seed)
ret = array_ops.reshape(
array_ops.transpose(samples),
array_ops.concat(0,
[array_ops.expand_dims(n, 0), self.batch_shape()]))
ret.set_shape(tensor_shape.vector(tensor_util.constant_value(n))
.concatenate(self.get_batch_shape()))
return math_ops.cast(ret, self.dtype)
示例9: _forward
def _forward(self, x):
y = x
# Pad the event_ndims with a zeros vector. We need this because it lets
# us infer the scale in the inverse function.
if self._static_event_ndims == 0:
y = array_ops.expand_dims(y, dim=-1)
zeros = array_ops.zeros_like(y)
else:
shape = array_ops.concat(0, (array_ops.shape(x)[:-1], [1]))
zeros = array_ops.zeros(shape, dtype=y.dtype)
y = array_ops.concat(array_ops.rank(y)-1, (y, zeros))
# Set shape hints.
if x.get_shape().ndims is not None:
shape = x.get_shape().as_list()
if self._static_event_ndims == 0:
shape += [2]
elif shape[-1] is not None:
shape[-1] += 1
shape = tensor_shape.TensorShape(shape)
y.get_shape().assert_is_compatible_with(shape)
y.set_shape(shape)
# Since we only support event_ndims in [0, 1] and we do padding, we always
# reduce over the last dimension, i.e., dim=-1 (which is the default).
return nn_ops.softmax(y)
示例10: _get_sparse_tensors
def _get_sparse_tensors(self, inputs, weight_collections=None,
trainable=None):
sparse_tensors = self.categorical_column._get_sparse_tensors(inputs)
id_tensor = sparse_tensors.id_tensor
weight_tensor = sparse_tensors.weight_tensor
# Expands final dimension, so that embeddings are not combined during
# embedding lookup.
check_id_rank = check_ops.assert_equal(
array_ops.rank(id_tensor), 2,
data=[
'Column {} expected ID tensor of rank 2. '.format(self.name),
'id_tensor shape: ', array_ops.shape(id_tensor)])
with ops.control_dependencies([check_id_rank]):
id_tensor = sparse_ops.sparse_reshape(
id_tensor,
shape=array_ops.concat([id_tensor.dense_shape, [1]], axis=0))
if weight_tensor is not None:
check_weight_rank = check_ops.assert_equal(
array_ops.rank(weight_tensor), 2,
data=[
'Column {} expected weight tensor of rank 2.'.format(self.name),
'weight_tensor shape:', array_ops.shape(weight_tensor)])
with ops.control_dependencies([check_weight_rank]):
weight_tensor = sparse_ops.sparse_reshape(
weight_tensor,
shape=array_ops.concat([weight_tensor.dense_shape, [1]], axis=0))
return fc._CategoricalColumn.IdWeightPair(id_tensor, weight_tensor)
示例11: _to_dense
def _to_dense(self):
num_cols = 0
rows = []
broadcasted_blocks = [operator.to_dense() for operator in self.operators]
broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
broadcasted_blocks)
for block in broadcasted_blocks:
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
mat = array_ops.concat(rows, axis=-2)
mat.set_shape(self.shape)
return mat
示例12: testZerosCacheDoesntLeakAcrossModes
def testZerosCacheDoesntLeakAcrossModes(self):
with ops.Graph().as_default():
t = random_ops.random_normal(shape=[100, 2])
x = random_ops.random_normal(shape=[100, 4])
dy = random_ops.random_normal(shape=[100, 4])
with backprop.GradientTape() as gradient_tape:
gradient_tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1 ** 2.
y = array_ops.concat([y1, t], axis=1)
dx = gradient_tape.gradient(y, x, output_gradients=dy)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
sess.run(dx)
t = random_ops.random_normal(shape=[100, 2])
x = random_ops.random_normal(shape=[100, 4])
dy = random_ops.random_normal(shape=[100, 4])
with backprop.GradientTape() as gradient_tape:
gradient_tape.watch(x)
x1, _ = array_ops.split(x, num_or_size_splits=2, axis=1)
y1 = x1 ** 2.
y = array_ops.concat([y1, t], axis=1)
dx = gradient_tape.gradient(y, x, output_gradients=dy)
示例13: same_dynamic_shape
def same_dynamic_shape(a, b):
"""Returns whether a and b have the same dynamic shape.
Args:
a: `Tensor`
b: `Tensor`
Returns:
`Boolean` `Tensor` representing if both tensors have the same shape.
"""
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
# One of the shapes isn't fully defined, so we need to use the dynamic
# shape.
return control_flow_ops.cond(
math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
# Here we can't just do math_ops.equal(a.shape, b.shape), since
# static shape inference may break the equality comparison between
# shape(a) and shape(b) in math_ops.equal.
lambda: math_ops.reduce_all(math_ops.equal(
array_ops.concat((
array_ops.shape(a),
array_ops.shape(b)), 0),
array_ops.concat((
array_ops.shape(b),
array_ops.shape(a)), 0))),
lambda: constant_op.constant(False))
示例14: _SparseDenseCwiseMulOrDivGrad
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
"""Common code for SparseDenseCwise{Mul,Div} gradients."""
x_indices = op.inputs[0]
x_shape = op.inputs[2]
y = op.inputs[3]
y_shape = math_ops.to_int64(array_ops.shape(y))
num_added_dims = array_ops.expand_dims(
array_ops.size(x_shape) - array_ops.size(y_shape), 0)
augmented_y_shape = array_ops.concat(
[array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)
scaling = x_shape // augmented_y_shape
scaled_indices = x_indices // scaling
scaled_indices = array_ops.slice(scaled_indices,
array_ops.concat([[0], num_added_dims], 0),
[-1, -1])
dense_vals = array_ops.gather_nd(y, scaled_indices)
if is_mul:
dx = grad * dense_vals
dy_val = grad * op.inputs[1]
else:
dx = grad / dense_vals
dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
# indices can repeat after scaling, so we can't use sparse_to_dense().
dy = sparse_ops.sparse_add(
array_ops.zeros_like(y),
sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))
# (sp_indices, sp_vals, sp_shape, dense)
return (None, dx, None, dy)
示例15: quantiles_ready
def quantiles_ready():
"""The subgraph for when the quantiles are ready."""
quantized_feature = quantile_ops.quantiles([sparse_column_values], [],
[quantile_buckets], [])
quantized_feature = math_ops.cast(quantized_feature[0], dtypes.int64)
quantized_feature = array_ops.reshape(quantized_feature, [-1])
example_indices, _ = array_ops.split(
sparse_column_indices, num_or_size_splits=2, axis=1)
example_indices = array_ops.squeeze(example_indices, [1])
filtered_gradients = array_ops.gather(gradients, example_indices)
filtered_hessians = array_ops.gather(hessians, example_indices)
filtered_partition_ids = array_ops.gather(example_partition_ids,
example_indices)
unique_partitions, mapped_partitions = array_ops.unique(
example_partition_ids)
# Compute aggregate stats for each partition.
per_partition_gradients = math_ops.unsorted_segment_sum(
gradients, mapped_partitions, array_ops.size(unique_partitions))
per_partition_hessians = math_ops.unsorted_segment_sum(
hessians, mapped_partitions, array_ops.size(unique_partitions))
# Prepend a bias feature per partition that accumulates the stats for all
# examples in that partition.
bias_feature_ids = array_ops.fill(
array_ops.shape(unique_partitions), _BIAS_FEATURE_ID)
bias_feature_ids = math_ops.cast(bias_feature_ids, dtypes.int64)
partition_ids = array_ops.concat(
[unique_partitions, filtered_partition_ids], 0)
filtered_gradients = array_ops.concat(
[per_partition_gradients, filtered_gradients], 0)
filtered_hessians = array_ops.concat(
[per_partition_hessians, filtered_hessians], 0)
bucket_ids = array_ops.concat([bias_feature_ids, quantized_feature], 0)
return partition_ids, bucket_ids, filtered_gradients, filtered_hessians