本文整理汇总了Python中tensorflow.python.ops.array_ops.transpose函数的典型用法代码示例。如果您正苦于以下问题:Python transpose函数的具体用法?Python transpose怎么用?Python transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_inverse_update_ops
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
ops = []
num_inverses = len(self._inverses_by_damping)
matrix_power_registered = bool(self._matpower_by_exp_and_damping)
use_eig = (
self._eigendecomp or matrix_power_registered or
num_inverses >= EIGENVALUE_DECOMPOSITION_THRESHOLD)
if use_eig:
self.register_eigendecomp() # ensures self._eigendecomp is set
eigenvalues, eigenvectors = self._eigendecomp # pylint: disable=unpacking-non-sequence
for damping, inv in self._inverses_by_damping.items():
ops.append(
inv.assign(
math_ops.matmul(eigenvectors / (eigenvalues + damping),
array_ops.transpose(eigenvectors))))
for (exp, damping), matpower in self._matpower_by_exp_and_damping.items():
ops.append(
matpower.assign(
math_ops.matmul(eigenvectors *
(eigenvalues + damping)**exp,
array_ops.transpose(eigenvectors))))
# These ops share computation and should be run on a single device.
ops = [control_flow_ops.group(*ops)]
else:
for damping, inv in self._inverses_by_damping.items():
ops.append(inv.assign(utils.posdef_inv(self._cov, damping)))
return ops
示例2: frames
def frames(signal, frame_length, frame_step, name=None):
"""Frame a signal into overlapping frames.
May be used in front of spectral functions.
For example:
```python
pcm = tf.placeholder(tf.float32, [None, 9152])
frames = tf.contrib.signal.frames(pcm, 512, 180)
magspec = tf.abs(tf.spectral.rfft(frames, [512]))
image = tf.expand_dims(magspec, 3)
```
Args:
signal: A `Tensor` of shape `[batch_size, signal_length]`.
frame_length: An `int32` or `int64` `Tensor`. The length of each frame.
frame_step: An `int32` or `int64` `Tensor`. The step between frames.
name: A name for the operation (optional).
Returns:
A `Tensor` of frames with shape `[batch_size, num_frames, frame_length]`.
Raises:
ValueError: if signal does not have rank 2.
"""
with ops.name_scope(name, "frames", [signal, frame_length, frame_step]):
signal = ops.convert_to_tensor(signal, name="signal")
frame_length = ops.convert_to_tensor(frame_length, name="frame_length")
frame_step = ops.convert_to_tensor(frame_step, name="frame_step")
signal_rank = signal.shape.ndims
if signal_rank != 2:
raise ValueError("expected signal to have rank 2 but was " + signal_rank)
signal_length = array_ops.shape(signal)[1]
num_frames = math_ops.ceil((signal_length - frame_length) / frame_step)
num_frames = 1 + math_ops.cast(num_frames, dtypes.int32)
pad_length = (num_frames - 1) * frame_step + frame_length
pad_signal = array_ops.pad(signal, [[0, 0], [0,
pad_length - signal_length]])
indices_frame = array_ops.expand_dims(math_ops.range(frame_length), 0)
indices_frames = array_ops.tile(indices_frame, [num_frames, 1])
indices_step = array_ops.expand_dims(
math_ops.range(num_frames) * frame_step, 1)
indices_steps = array_ops.tile(indices_step, [1, frame_length])
indices = indices_frames + indices_steps
# TODO(androbin): remove `transpose` when `gather` gets `axis` support
pad_signal = array_ops.transpose(pad_signal)
signal_frames = array_ops.gather(pad_signal, indices)
signal_frames = array_ops.transpose(signal_frames, perm=[2, 0, 1])
return signal_frames
示例3: functional_rnn
def functional_rnn(cell, inputs, sequence_length=None,
initial_state=None, dtype=None, time_major=False,
scope=None, use_tpu=False):
"""Same interface as `tf.nn.dynamic_rnn`."""
with variable_scope.variable_scope(scope or 'rnn'):
if not time_major:
inputs = nest.map_structure(
lambda t: array_ops.transpose(t, [1, 0, 2]), inputs)
inputs_flat = nest.flatten(inputs)
batch_size = array_ops.shape(inputs_flat[0])[1]
if initial_state is None:
initial_state = cell.zero_state(batch_size, dtype)
func_cell = _FunctionalRnnCell(cell, inputs, initial_state)
if sequence_length is not None:
max_length = math_ops.reduce_max(sequence_length)
else:
max_length = None
extended_acc_state, extended_final_state = recurrent.Recurrent(
theta=func_cell.theta,
state0=func_cell.extended_initial_state,
inputs=inputs,
cell_fn=func_cell.cell_step,
max_input_length=max_length,
use_tpu=use_tpu)
tf_output, tf_state = _PostProcessOutput(
extended_acc_state, extended_final_state, func_cell,
inputs_flat[0].shape[0], sequence_length)
if time_major:
tf_output = array_ops.transpose(tf_output, [1, 0, 2])
return tf_output, tf_state
示例4: rot90
def rot90(image, k=1):
"""Rotate an image counter-clockwise by 90 degrees.
Args:
image: A 3-D tensor of shape `[height, width, channels].`
k: Number of times the image is rotated by 90 degrees.
Returns:
A rotated 3-D tensor of the same type and shape as `image`.
"""
image = ops.convert_to_tensor(image, name='image')
_Check3DImage(image, require_static=False)
k %= 4
if k == 0:
return image
elif k == 1:
return array_ops.transpose(
array_ops.reverse(image, [False, True, False]),
[1, 0, 2], name='rot90')
elif k == 2:
return array_ops.reverse(image, [True, True, False], name='rot90')
elif k == 3:
return array_ops.reverse(
array_ops.transpose(image, [1, 0, 2], name='rot90'),
[False, True, False])
示例5: _GetVal
def _GetVal(use_xla):
with self.cached_session():
t0 = array_ops.placeholder(np.float32, shape=input_sizes)
t1 = constant_op.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = array_ops.placeholder(np.float32, shape=output_sizes)
native_t0 = t0
native_t2 = t2
strides = [1, stride, stride, 1]
if use_xla:
if data_format == "NCHW":
# Transpose from NWHC input to NCHW
# Ex. [4, 5, 5, 48] to [4, 48, 5, 5]
native_t0 = array_ops.transpose(t0, [0, 3, 1, 2])
native_t2 = array_ops.transpose(t2, [0, 3, 1, 2])
strides = [1, 1, stride, stride]
with self.test_scope():
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0,
t1,
native_t2,
strides=strides,
padding=padding,
data_format=data_format)
else:
# For CPU, the format NCHW is not supported. Therefore we always use
# NHWC here.
backprop = nn_ops.depthwise_conv2d_native_backprop_filter(
native_t0, t1, native_t2, strides=strides, padding=padding)
ret = backprop.eval({t0: x0, t2: x2})
self.assertShapeEqual(ret, backprop)
return ret
示例6: cudnn_lstm
def cudnn_lstm(inputs, input_h, input_c, kernel, recurrent_kernel, bias, units):
inputs = array_ops.transpose(inputs, perm=(1, 0, 2))
input_h = array_ops.expand_dims(input_h, axis=0)
input_c = array_ops.expand_dims(input_c, axis=0)
params = _canonical_to_params(
weights=[
kernel[:, :units],
kernel[:, units:units * 2],
kernel[:, units * 2:units * 3],
kernel[:, units * 3:],
recurrent_kernel[:, :units],
recurrent_kernel[:, units:units * 2],
recurrent_kernel[:, units * 2:units * 3],
recurrent_kernel[:, units * 3:],
],
biases=[
bias[:units],
bias[units:units * 2],
bias[units * 2:units * 3],
bias[units * 3:units * 4],
bias[units * 4:units * 5],
bias[units * 5:units * 6],
bias[units * 6:units * 7],
bias[units * 7:],
],
shape=constant_op.constant([-1]))
outputs, h, c, _ = gen_cudnn_rnn_ops.cudnn_rnn(
inputs, input_h=input_h, input_c=input_c, params=params)
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
h = h[0]
c = c[0]
return outputs, [h, c], constant_op.constant(
'cudnn', dtype=dtypes.string, name='runtime')
示例7: _ExtractImagePatchesGrad
def _ExtractImagePatchesGrad(op, grad):
batch_size, rows_in, cols_in, channels = [
dim.value for dim in op.inputs[0].shape.dims
]
input_bhwc = array_ops.shape(op.inputs[0])
batch_size = input_bhwc[0]
channels = input_bhwc[3]
# Create indices matrix for input tensor.
# Note that 0 is preserved for padding location,
# so indices for input start from 1 to 1 + rows_in * cols_in.
input_indices_num = 1 + rows_in * cols_in
input_idx = array_ops.reshape(math_ops.range(1, input_indices_num,
dtype=ops.dtypes.int64),
(1, rows_in, cols_in, 1))
input_idx_patched = gen_array_ops.extract_image_patches(
input_idx,
op.get_attr("ksizes"),
op.get_attr("strides"),
op.get_attr("rates"),
op.get_attr("padding"))
# Create indices matrix for output tensor.
_, rows_out, cols_out, _ = [dim.value for dim in op.outputs[0].shape.dims]
_, ksize_r, ksize_c, _ = op.get_attr("ksizes")
# Indices for output start from 0.
output_indices_num = rows_out * cols_out * ksize_r * ksize_c
output_idx = array_ops.reshape(math_ops.range(output_indices_num,
dtype=ops.dtypes.int64),
(1, rows_out, cols_out, ksize_r * ksize_c))
# Construct mapping table for indices: (input -> output).
idx_matrix = array_ops.concat(
[array_ops.expand_dims(input_idx_patched, axis=-1),
array_ops.expand_dims(output_idx, axis=-1)],
axis=-1)
idx_map = array_ops.reshape(idx_matrix, (-1, 2))
sp_shape = (input_indices_num, output_indices_num)
sp_mat_full = sparse_tensor.SparseTensor(
idx_map,
array_ops.ones([output_indices_num], dtype=grad.dtype),
sp_shape)
# Remove all padding locations [0, :].
sp_mat = sparse_ops.sparse_slice(sp_mat_full,
(1, 0),
(input_indices_num - 1, output_indices_num))
grad_expanded = array_ops.transpose(
array_ops.reshape(
grad, (batch_size, rows_out, cols_out, ksize_r, ksize_c, channels)),
(1, 2, 3, 4, 0, 5))
grad_flat = array_ops.reshape(grad_expanded, (-1, batch_size * channels))
jac = sparse_ops.sparse_tensor_dense_matmul(sp_mat, grad_flat)
grad_out = array_ops.reshape(jac, (rows_in, cols_in, batch_size, channels))
grad_out = array_ops.transpose(grad_out, (2, 0, 1, 3))
return [grad_out]
示例8: separable_lstm
def separable_lstm(images, num_filters_out,
kernel_size=None, nhidden=None, scope=None):
"""Run bidirectional LSTMs first horizontally then vertically.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
kernel_size: A list of length 2 holding the [kernel_height, kernel_width] of
of the pooling. Can be an int if both values are the same. Set to None for
not using blocks
nhidden: hidden layer depth
scope: optional scope name
Returns:
(num_images, height/kernel_height, width/kernel_width,
num_filters_out) tensor
"""
with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
if nhidden is None:
nhidden = num_filters_out
if kernel_size is not None:
images = get_blocks(images, kernel_size)
hidden = horizontal_lstm(images, nhidden)
with variable_scope.variable_scope("vertical"):
transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
output_transposed = horizontal_lstm(transposed, num_filters_out)
output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
return output
示例9: make_inverse_update_ops
def make_inverse_update_ops(self):
"""Create and return update ops corresponding to registered computations."""
ops = super(InverseProvidingFactor, self).make_inverse_update_ops()
num_inverses = len(self._inverses_by_damping)
matrix_power_registered = bool(self._matpower_by_exp_and_damping)
use_eig = (self._eigendecomp or matrix_power_registered or
num_inverses >= EIGENVALUE_DECOMPOSITION_THRESHOLD)
if use_eig:
self.register_eigendecomp() # ensures self._eigendecomp is set
eigenvalues, eigenvectors = self._eigendecomp # pylint: disable=unpacking-non-sequence
# The matrix self._cov is positive semidefinite by construction, but the
# numerical eigenvalues could be negative due to numerical errors, so here
# we clip them to be at least EIGENVALUE_CLIPPING_THRESHOLD.
clipped_eigenvalues = math_ops.maximum(eigenvalues,
EIGENVALUE_CLIPPING_THRESHOLD)
for damping, inv in self._inverses_by_damping.items():
ops.append(
inv.assign(
math_ops.matmul(eigenvectors / (clipped_eigenvalues + damping),
array_ops.transpose(eigenvectors))))
for (exp, damping), matpower in self._matpower_by_exp_and_damping.items():
ops.append(
matpower.assign(
math_ops.matmul(eigenvectors * (clipped_eigenvalues + damping)**
exp, array_ops.transpose(eigenvectors))))
else:
for damping, inv in self._inverses_by_damping.items():
ops.append(inv.assign(utils.posdef_inv(self._cov, damping)))
return ops
示例10: build_graph
def build_graph(device, input_shape, perm, datatype, num_iters):
"""builds a graph containing a sequence of conv2d operations.
Args:
device: String, the device to run on.
input_shape: Shape of the input tensor.
perm: A list of ints with the same length as input tensor's dimension.
datatype: numpy data type of the input tensor.
num_iters: number of iterations to run transpose.
Returns:
An array of tensors to run()
"""
with ops.device("/%s:0" % device):
total_size = np.prod(input_shape)
inp = np.arange(1, total_size + 1, dtype=datatype).reshape(input_shape)
t = constant_op.constant(inp, shape=input_shape)
outputs = []
transpose_op = array_ops.transpose(t, perm)
outputs.append(transpose_op)
for _ in range(1, num_iters):
with ops.control_dependencies([transpose_op]):
transpose_op = array_ops.transpose(t, perm)
outputs.append(transpose_op)
return control_flow_ops.group(*outputs)
示例11: reduce_to_final
def reduce_to_final(images, num_filters_out, nhidden=None, scope=None):
"""Reduce an image to a final state by running two LSTMs.
Args:
images: (num_images, height, width, depth) tensor
num_filters_out: output layer depth
nhidden: hidden layer depth (defaults to num_filters_out)
scope: optional scope name
Returns:
A (num_images, num_filters_out) batch.
"""
with variable_scope.variable_scope(scope, "ReduceToFinal", [images]):
nhidden = nhidden or num_filters_out
batch_size, height, width, depth = _shape(images)
transposed = array_ops.transpose(images, [1, 0, 2, 3])
reshaped = array_ops.reshape(transposed,
[height, batch_size * width, depth])
with variable_scope.variable_scope("reduce1"):
reduced = lstm1d.sequence_to_final(reshaped, nhidden)
transposed_hidden = array_ops.reshape(reduced,
[batch_size, width, nhidden])
hidden = array_ops.transpose(transposed_hidden, [1, 0, 2])
with variable_scope.variable_scope("reduce2"):
output = lstm1d.sequence_to_final(hidden, num_filters_out)
return output
示例12: _SparseMatMul
def _SparseMatMul(t1, t2, transpose_a=False, transpose_b=False):
"""Helper function to create SparseMatMul op."""
assert t1 in is_sparse and t2 in is_sparse
t1_sparse = is_sparse[t1]
t2_sparse = is_sparse[t2]
if not t1_sparse and not t2_sparse:
return math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b)
transpose_out = False
if not t1_sparse:
transpose_out = True
t1, t2 = t2, t1
t1_sparse, t2_sparse = t2_sparse, t1_sparse
assert t1_sparse
transpose_a, transpose_b = not transpose_b, not transpose_a
if transpose_b:
t2 = array_ops.transpose(t2)
transpose_b = False
m = math_ops.matmul(t1, t2,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=t1_sparse,
b_is_sparse=t2_sparse)
if transpose_out:
m = array_ops.transpose(m)
return m
示例13: call
def call(self, inputs):
if self.data_format == 'channels_first':
# Reshape to channels last
inputs = array_ops.transpose(inputs, (0, 2, 3, 1))
# Apply the actual ops.
outputs = nn.separable_conv2d(
inputs,
self.depthwise_kernel,
self.pointwise_kernel,
strides=(1,) + self.strides + (1,),
padding=self.padding.upper(),
rate=self.dilation_rate)
if self.data_format == 'channels_first':
# Reshape to channels first
outputs = array_ops.transpose(outputs, (0, 3, 1, 2))
if self.bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
示例14: _state_to_olabel_unique
def _state_to_olabel_unique(labels, num_labels, states, unique):
"""Sum state log probs to ilabel log probs using unique label indices."""
num_label_states = _get_dim(labels, 1) + 1
label_states = states[:, :, 1:num_label_states]
blank_states = states[:, :, num_label_states:]
unique_y, unique_idx = unique
mul_reduce = _sum_states(unique_idx, label_states)
num_frames = states.shape[0]
batch_size = states.shape[1]
num_states = num_label_states - 1
batch_state_major = array_ops.transpose(mul_reduce, perm=[1, 2, 0])
batch_state_major = array_ops.reshape(
batch_state_major, [batch_size * num_states, num_frames])
batch_offset = math_ops.range(batch_size, dtype=unique_y.dtype) * num_labels
indices = unique_y + array_ops.expand_dims(batch_offset, axis=-1)
indices = array_ops.reshape(indices, [-1, 1])
scatter = array_ops.scatter_nd(
indices=indices,
updates=batch_state_major,
shape=[batch_size * num_labels, num_frames])
scatter = array_ops.reshape(scatter, [batch_size, num_labels, num_frames])
scatter = array_ops.where(
math_ops.equal(scatter, 0.0),
array_ops.fill(array_ops.shape(scatter), math_ops.log(0.0)),
scatter)
label_olabels = array_ops.transpose(scatter, [2, 0, 1])
label_olabels = label_olabels[:, :, 1:]
blank_olabels = math_ops.reduce_logsumexp(
blank_states, axis=2, keepdims=True)
return array_ops.concat([blank_olabels, label_olabels], axis=-1)
示例15: runFiniteDifferences
def runFiniteDifferences(self, shapes, dtypes=(dtypes_lib.float32, dtypes_lib.float64), scalarTest=False):
with self.test_session(use_gpu=False):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
x = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
tensor = math_ops.matmul(x, array_ops.transpose(x)) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
x = constant_op.constant(np.random.randn(), dtype)
R = constant_op.constant(np.random.randn(shape[0], shape[1]), dtype)
e = math_ops.mul(R, x)
tensor = math_ops.matmul(e, array_ops.transpose(e)) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = array_ops.tile(array_ops.expand_dims(tensor, 0), [4, 1, 1])
y = linalg_ops.cholesky(tensor)
if scalarTest:
y = math_ops.reduce_mean(y)
error = gradient_checker.compute_gradient_error(x, x._shape_as_list(), y, y._shape_as_list())
tf_logging.info("error = %f", error)
if dtype == dtypes_lib.float64:
self.assertLess(error, 1e-5)
else:
self.assertLess(error, 3e-3)