本文整理汇总了Python中tensorflow.transpose函数的典型用法代码示例。如果您正苦于以下问题:Python transpose函数的具体用法?Python transpose怎么用?Python transpose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了transpose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(
self,
layer=None,
act=tf.identity,
epsilon=1e-5,
scale_init=tf.constant_initializer(1.0),
offset_init=tf.constant_initializer(0.0),
G=32,
name='group_norm',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
print(" [TL] GroupNormLayer %s: epsilon:%f act:%s" % (self.name, epsilon, act.__name__))
inputs_shape = get_shape(layer.outputs)
G = tf.minimum(G, inputs_shape[-1])
# [N, H, W, C] to [N, C, H, W]
temp_input = tf.transpose(self.inputs, [0, 3, 1, 2])
temp_input = tf.reshape(temp_input, [inputs_shape[0], G, inputs_shape[-1]//G, inputs_shape[1], inputs_shape[2]],
name='group_reshape1')
with tf.variable_scope(name) as vs:
mean, var = tf.nn.moments(temp_input, [2, 3, 4], keep_dims=True)
scale = tf.get_variable('scale', shape=[1, inputs_shape[-1], 1, 1], initializer=scale_init, dtype=D_TYPE)
offset = tf.get_variable('offset', shape=[1, inputs_shape[-1], 1, 1], initializer=offset_init, dtype=D_TYPE)
temp_input = (temp_input - mean) / tf.sqrt(var + epsilon)
temp_input = tf.reshape(temp_input, shape=[inputs_shape[0], inputs_shape[-1], inputs_shape[1], inputs_shape[2]],
name='group_reshape2')
self.outputs = scale * temp_input + offset
self.outputs = tf.transpose(self.outputs, [0, 2, 3, 1])
self.outputs = act(self.outputs)
variables = tf.get_collection(TF_GRAPHKEYS_VARIABLES, scope=vs.name)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend([self.outputs])
self.all_params.extend(variables)
示例2: _define_distance_to_clusters
def _define_distance_to_clusters(self, data):
"""Defines the Mahalanobis distance to the assigned Gaussian."""
# TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
# mean) from log probability function.
self._all_scores = []
for shard in data:
all_scores = []
shard = tf.expand_dims(shard, 0)
for c in xrange(self._num_classes):
if self._covariance_type == FULL_COVARIANCE:
cov = self._covs[c, :, :]
elif self._covariance_type == DIAG_COVARIANCE:
cov = tf.diag(self._covs[c, :])
inverse = tf.matrix_inverse(cov + self._min_var)
inv_cov = tf.tile(
tf.expand_dims(inverse, 0),
tf.pack([self._num_examples, 1, 1]))
diff = tf.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
m_left = tf.batch_matmul(diff, inv_cov)
all_scores.append(tf.sqrt(tf.batch_matmul(
m_left, tf.transpose(diff, perm=[0, 2, 1])
)))
self._all_scores.append(tf.reshape(
tf.concat(1, all_scores),
tf.pack([self._num_examples, self._num_classes])))
# Distance to the associated class.
self._all_scores = tf.concat(0, self._all_scores)
assignments = tf.concat(0, self.assignments())
rows = tf.to_int64(tf.range(0, self._num_examples))
indices = tf.concat(1, [tf.expand_dims(rows, 1),
tf.expand_dims(assignments, 1)])
self._scores = tf.gather_nd(self._all_scores, indices)
示例3: build_predict
def build_predict(self, Xnew, full_cov=False):
"""
Xnew is a data matrix, point at which we want to predict
This method computes
p(F* | Y )
where F* are points on the GP at Xnew, Y are noisy observations at X.
"""
Kx = self.kern.K(self.X, Xnew)
K = self.kern.K(self.X) + eye(self.num_data) * self.likelihood.variance
L = tf.cholesky(K)
A = tf.matrix_triangular_solve(L, Kx, lower=True)
V = tf.matrix_triangular_solve(L, self.Y - self.mean_function(self.X))
fmean = tf.matmul(tf.transpose(A), V) + self.mean_function(Xnew)
if full_cov:
fvar = self.kern.K(Xnew) - tf.matmul(tf.transpose(A), A)
shape = tf.pack([1, 1, tf.shape(self.Y)[1]])
fvar = tf.tile(tf.expand_dims(fvar, 2), shape)
else:
fvar = self.kern.Kdiag(Xnew) - tf.reduce_sum(tf.square(A), 0)
fvar = tf.tile(tf.reshape(fvar, (-1, 1)), [1, self.Y.shape[1]])
return fmean, fvar
示例4: chebyshev5
def chebyshev5(self, x, L, Fout, K, regularization=False):
N, M, Fin = x.get_shape()
N, M, Fin = int(N), int(M), int(Fin)
# Rescale Laplacian and store as a TF sparse tensor. Copy to not modify the shared L.
L = scipy.sparse.csr_matrix(L)
L = graph.rescale_L(L, lmax=2)
L = L.tocoo()
indices = np.column_stack((L.row, L.col))
L = tf.SparseTensor(indices, L.data, L.shape)
L = tf.sparse_reorder(L)
# Transform to Chebyshev basis
x0 = tf.transpose(x, perm=[1, 2, 0]) # M x Fin x N
x0 = tf.reshape(x0, [M, Fin*N]) # M x Fin*N
x = tf.expand_dims(x0, 0) # 1 x M x Fin*N
def concat(x, x_):
x_ = tf.expand_dims(x_, 0) # 1 x M x Fin*N
return tf.concat(0, [x, x_]) # K x M x Fin*N
if K > 1:
x1 = tf.sparse_tensor_dense_matmul(L, x0)
x = concat(x, x1)
for k in range(2, K):
x2 = 2 * tf.sparse_tensor_dense_matmul(L, x1) - x0 # M x Fin*N
x = concat(x, x2)
x0, x1 = x1, x2
x = tf.reshape(x, [K, M, Fin, N]) # K x M x Fin x N
x = tf.transpose(x, perm=[3,1,2,0]) # N x M x Fin x K
x = tf.reshape(x, [N*M, Fin*K]) # N*M x Fin*K
# Filter: Fin*Fout filters of order K, i.e. one filterbank per feature pair.
W = self._weight_variable([Fin*K, Fout], regularization=regularization)
x = tf.matmul(x, W) # N*M x Fout
return tf.reshape(x, [N, M, Fout]) # N x M x Fout
示例5: body
def body(self, features):
hp = self.hparams
block_fns = {
"residual": residual_block,
"bottleneck": bottleneck_block,
}
assert hp.block_fn in block_fns
inputs = features["inputs"]
data_format = "channels_last"
if hp.use_nchw:
# Convert from channels_last (NHWC) to channels_first (NCHW). This
# provides a large performance boost on GPU.
inputs = tf.transpose(inputs, [0, 3, 1, 2])
data_format = "channels_first"
out = resnet_v2(
inputs,
block_fns[hp.block_fn],
hp.layer_sizes,
hp.filter_sizes,
data_format,
is_training=hp.mode == tf.estimator.ModeKeys.TRAIN)
if hp.use_nchw:
out = tf.transpose(out, [0, 2, 3, 1])
return out
示例6: bboxes_jaccard
def bboxes_jaccard(bbox_ref, bboxes, name=None):
"""Compute jaccard score between a reference box and a collection
of bounding boxes.
Args:
bbox_ref: (N, 4) or (4,) Tensor with reference bounding box(es).
bboxes: (N, 4) Tensor, collection of bounding boxes.
Return:
(N,) Tensor with Jaccard scores.
"""
with tf.name_scope(name, 'bboxes_jaccard'):
# Should be more efficient to first transpose.
bboxes = tf.transpose(bboxes)
bbox_ref = tf.transpose(bbox_ref)
# Intersection bbox and volume.
int_ymin = tf.maximum(bboxes[0], bbox_ref[0])
int_xmin = tf.maximum(bboxes[1], bbox_ref[1])
int_ymax = tf.minimum(bboxes[2], bbox_ref[2])
int_xmax = tf.minimum(bboxes[3], bbox_ref[3])
h = tf.maximum(int_ymax - int_ymin, 0.)
w = tf.maximum(int_xmax - int_xmin, 0.)
# Volumes.
inter_vol = h * w
union_vol = -inter_vol \
+ (bboxes[2] - bboxes[0]) * (bboxes[3] - bboxes[1]) \
+ (bbox_ref[2] - bbox_ref[0]) * (bbox_ref[3] - bbox_ref[1])
jaccard = tfe_math.safe_divide(inter_vol, union_vol, 'jaccard')
return jaccard
示例7: soft_alignment
def soft_alignment(U_AP, raw_question_rep, raw_answer_rep, tokens_question_non_zero, tokens_answer_non_zero):
"""Calculate the AP soft-alignment matrix (in a batch-friendly fashion)
:param U_AP: The AP similarity matrix (to be learned)
:param raw_question_rep:
:param raw_answer_rep:
:param tokens_question_non_zero:
:param tokens_answer_non_zero:
:return:
"""
answer_transposed = tf.transpose(raw_answer_rep, [0, 2, 1])
# Unfortunately, there is no clean way in TF to multiply a 3d tensor with a 2d tensor. We need to perform some
# reshaping. Compare solution 2 on
# http://stackoverflow.com/questions/38235555/tensorflow-matmul-of-input-matrix-with-batch-data
raw_question_rep_flat = tf.reshape(raw_question_rep, [-1, tf.shape(raw_question_rep)[2]])
QU_flat = tf.matmul(raw_question_rep_flat, U_AP)
QU = tf.reshape(QU_flat, [-1, tf.shape(raw_question_rep)[1], tf.shape(raw_question_rep)[2]])
QUA = tf.batch_matmul(QU, answer_transposed)
G = tf.nn.tanh(QUA)
# We are now removing all the fields of G that belong to zero padding. To achieve this, we are determining these
# fields and adding a value of -2 to all of them (which is guaranteed to result in a smaller number than the minimum
# of G, which is -1)
additions_G_question = tf.transpose(
tf.reshape((tokens_question_non_zero - 1) * 2, [-1, 1, tf.shape(tokens_question_non_zero)[1]]),
[0, 2, 1]
)
additions_G_answer = tf.reshape((tokens_answer_non_zero - 1) * 2, [-1, 1, tf.shape(tokens_answer_non_zero)[1]])
# G_non_zero contains values of less than -1 for all fields which have a relation to zero-padded token positions
G_non_zero = G + additions_G_question + additions_G_answer
return G_non_zero
示例8: compute_pairwise_distances
def compute_pairwise_distances(x, y):
"""Computes the squared pairwise Euclidean distances between x and y.
Args:
x: a tensor of shape [num_x_samples, num_features]
y: a tensor of shape [num_y_samples, num_features]
Returns:
a distance matrix of dimensions [num_x_samples, num_y_samples].
Raises:
ValueError: if the inputs do no matched the specified dimensions.
"""
if not len(x.get_shape()) == len(y.get_shape()) == 2:
raise ValueError('Both inputs should be matrices.')
if x.get_shape().as_list()[1] != y.get_shape().as_list()[1]:
raise ValueError('The number of features should be the same.')
norm = lambda x: tf.reduce_sum(tf.square(x), 1)
# By making the `inner' dimensions of the two matrices equal to 1 using
# broadcasting then we are essentially substracting every pair of rows
# of x and y.
# x will be num_samples x num_features x 1,
# and y will be 1 x num_features x num_samples (after broadcasting).
# After the substraction we will get a
# num_x_samples x num_features x num_y_samples matrix.
# The resulting dist will be of shape num_y_samples x num_x_samples.
# and thus we need to transpose it again.
return tf.transpose(norm(tf.expand_dims(x, 2) - tf.transpose(y)))
示例9: inputs
def inputs(path):
whole = read_csv(FLAGS.batch_size, path)
features = tf.transpose(tf.pack(whole[0:FLAGS.max_sentence_len]))
label = tf.one_hot(
tf.transpose(tf.pack(whole[FLAGS.max_sentence_len])),
depth=2)
return features, label
示例10: _parser
def _parser(serialized_example):
"""Parses a single tf.Example into image and label tensors."""
features = tf.parse_single_example(
serialized_example,
features={
"image": tf.FixedLenFeature([], tf.string),
"label": tf.FixedLenFeature([], tf.int64),
})
image = tf.decode_raw(features["image"], tf.uint8)
# Initially reshaping to [H, W, C] does not work
image = tf.reshape(image, [NUM_CHANNEL, IMAGE_HEIGHT, IMAGE_WIDTH])
# This is needed for `tf.image.resize_image_with_crop_or_pad`
image = tf.transpose(image, [1, 2, 0])
image = tf.cast(image, dtype)
label = tf.cast(features["label"], tf.int32)
if data_aug:
image = tf.image.resize_image_with_crop_or_pad(image, IMAGE_HEIGHT + 4,
IMAGE_WIDTH + 4)
image = tf.random_crop(image, [IMAGE_HEIGHT, IMAGE_WIDTH, NUM_CHANNEL])
image = tf.image.random_flip_left_right(image)
if data_format == "channels_first":
image = tf.transpose(image, [2, 0, 1])
if div255:
image /= 255.
return image, label
示例11: lid_term
def lid_term(logits, batch_size=100):
"""Calculate LID loss term for a minibatch of logits
:param logits:
:return:
"""
# y_pred = tf.nn.softmax(logits)
y_pred = logits
# calculate pairwise distance
r = tf.reduce_sum(y_pred * y_pred, 1)
# turn r into column vector
r1 = tf.reshape(r, [-1, 1])
D = r1 - 2 * tf.matmul(y_pred, tf.transpose(y_pred)) + tf.transpose(r1) + \
tf.ones([batch_size, batch_size])
# find the k nearest neighbor
D1 = -tf.sqrt(D)
D2, _ = tf.nn.top_k(D1, k=21, sorted=True)
D3 = -D2[:, 1:]
m = tf.transpose(tf.multiply(tf.transpose(D3), 1.0 / D3[:, -1]))
v_log = tf.reduce_sum(tf.log(m + 1e-9), axis=1) # to avoid nan
lids = -20 / v_log
## batch normalize lids
# lids = tf.nn.l2_normalize(lids, dim=0, epsilon=1e-12)
return lids
示例12: skew
def skew(inputs, scope="skew"):
with tf.name_scope(scope):
batch, height, width, channel = get_shape(inputs) # [batch, height, width, channel]
rows = tf.split(1, height, inputs) # [batch, 1, width, channel]
new_width = width + height - 1
new_rows = []
for idx, row in enumerate(rows):
transposed_row = tf.transpose(tf.squeeze(row, [1]), [0, 2, 1]) # [batch, channel, width]
squeezed_row = tf.reshape(transposed_row, [-1, width]) # [batch*channel, width]
padded_row = tf.pad(squeezed_row, ((0, 0), (idx, height - 1 - idx))) # [batch*channel, width*2-1]
unsqueezed_row = tf.reshape(padded_row, [-1, channel, new_width]) # [batch, channel, width*2-1]
untransposed_row = tf.transpose(unsqueezed_row, [0, 2, 1]) # [batch, width*2-1, channel]
assert get_shape(untransposed_row) == [batch, new_width, channel], "wrong shape of skewed row"
new_rows.append(untransposed_row)
outputs = tf.pack(new_rows, axis=1, name="output")
assert get_shape(outputs) == [None, height, new_width, channel], "wrong shape of skewed output"
logger.debug('[skew] %s : %s %s -> %s %s' \
% (scope, inputs.name, inputs.get_shape(), outputs.name, outputs.get_shape()))
return outputs
示例13: _multichannel_image_summary
def _multichannel_image_summary(name, images, perm=[0, 3, 1, 2], max_summary_images=16):
_min = tf.reduce_min(images)
_max = tf.reduce_max(images)
_ = tf.mul(tf.div(tf.add(images, _min), tf.sub(_max, _min)), 255.0)
_ = tf.transpose(_, perm=perm)
shape = _.get_shape().as_list()
tf.image_summary(name, tf.reshape(tf.transpose(_, perm=perm), [reduce(lambda x,y:x*y, shape)/(shape[3]*shape[2]), shape[2], shape[3], 1]), max_images=max_summary_images)
示例14: channel_wise_fc_layer
def channel_wise_fc_layer(bottom, name, bias=True):
"""
channel-wise fully connected layer
"""
_, width, height, n_feat_map = bottom.get_shape().as_list()
input_reshape = tf.reshape( bottom, [-1, width*height, n_feat_map] ) # order='C'
input_transpose = tf.transpose( input_reshape, [2,0,1] ) # n_feat_map * batch * d
with tf.variable_scope(name):
W = tf.get_variable(
"W",
shape=[n_feat_map,width*height, width*height], # n_feat_map * d * d_filter
initializer=tf.truncated_normal_initializer(0., 0.005))
output = tf.batch_matmul(input_transpose, W) # n_feat_map * batch * d_filter
if bias == True:
b = tf.get_variable(
"b",
shape=width*height,
initializer=tf.constant_initializer(0.))
output = tf.nn.bias_add(output, b)
output_transpose = tf.transpose(output, [1,2,0]) # batch * d_filter * n_feat_map
output_reshape = tf.reshape( output_transpose, [-1, width, height, n_feat_map] )
return output_reshape
示例15: runFiniteDifferences
def runFiniteDifferences(self, shapes, dtypes=(tf.float32, tf.float64),
scalarTest=False):
with self.test_session(use_gpu=False):
for shape in shapes:
for batch in False, True:
for dtype in dtypes:
if not scalarTest:
x = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
tensor = tf.matmul(x, tf.transpose(x)) / shape[0]
else:
# This is designed to be a faster test for larger matrices.
x = tf.constant(np.random.randn(), dtype)
R = tf.constant(np.random.randn(shape[0], shape[1]), dtype)
e = tf.mul(R, x)
tensor = tf.matmul(e, tf.transpose(e)) / shape[0]
# Inner-most matrices in tensor are positive definite.
if batch:
tensor = tf.tile(tf.expand_dims(tensor, 0), [4, 1, 1])
op = tf.batch_cholesky
else:
op = tf.cholesky
if not (scalarTest):
y = op(tensor)
else:
y = tf.reduce_mean(op(tensor))
error = tf.test.compute_gradient_error(x, x._shape_as_list(), y,
y._shape_as_list())
tf.logging.info("error = %f", error)
if dtype == tf.float64:
self.assertLess(error, 1e-5)
else:
self.assertLess(error, 3e-3)