本文整理汇总了Python中tensorflow.keras.backend.expand_dims方法的典型用法代码示例。如果您正苦于以下问题:Python backend.expand_dims方法的具体用法?Python backend.expand_dims怎么用?Python backend.expand_dims使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.expand_dims方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sequence_masking
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def sequence_masking(x, mask, mode=0, axis=None):
"""为序列条件mask的函数
mask: 形如(batch_size, seq_len)的0-1矩阵;
mode: 如果是0,则直接乘以mask;
如果是1,则在padding部分减去一个大正数。
axis: 序列所在轴,默认为1;
"""
if mask is None or mode not in [0, 1]:
return x
else:
if axis is None:
axis = 1
if axis == -1:
axis = K.ndim(x) - 1
assert axis > 0, 'axis muse be greater than 0'
for _ in range(axis - 1):
mask = K.expand_dims(mask, 1)
for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
mask = K.expand_dims(mask, K.ndim(mask))
if mode == 0:
return x * mask
else:
return x - (1 - mask) * 1e12
示例2: pool1d
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def pool1d(
x,
pool_size,
strides=1,
padding='valid',
data_format=None,
pool_mode='max'
):
"""向量序列的pool函数
"""
x = K.expand_dims(x, 1)
x = K.pool2d(
x,
pool_size=(1, pool_size),
strides=(1, strides),
padding=padding,
data_format=data_format,
pool_mode=pool_mode
)
return x[:, 0]
示例3: _rotation_matrix_zyz
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _rotation_matrix_zyz(self, params):
phi = params[0] * 2 * np.pi - np.pi; theta = params[1] * 2 * np.pi - np.pi; psi_t = params[2] * 2 * np.pi - np.pi;
loc_r = params[3:6] * 2 - 1
a1 = self._rotation_matrix_axis(2, psi_t) # first rotate about z axis for angle psi_t
a2 = self._rotation_matrix_axis(1, theta)
a3 = self._rotation_matrix_axis(2, phi)
rm = K.dot(K.dot(a3,a2),a1)
rm = tf.transpose(rm)
c = K.dot(-rm, K.expand_dims(loc_r))
rm = K.flatten(rm)
theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])
return theta
示例4: _mask_rotation_matrix_zyz
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _mask_rotation_matrix_zyz(self, params):
phi = params[0] * 2 * np.pi - np.pi; theta = params[1] * 2 * np.pi - np.pi; psi_t = params[2] * 2 * np.pi - np.pi;
loc_r = params[3:6] * 0 # magnitude of Fourier transformation is translation-invariant
a1 = self._rotation_matrix_axis(2, psi_t)
a2 = self._rotation_matrix_axis(1, theta)
a3 = self._rotation_matrix_axis(2, phi)
rm = K.dot(K.dot(a3,a2),a1)
rm = tf.transpose(rm)
c = K.dot(-rm, K.expand_dims(loc_r))
rm = K.flatten(rm)
theta = K.concatenate([rm[:3], c[0], rm[3:6], c[1], rm[6:9], c[2]])
return theta
示例5: _batch_mgrid
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _batch_mgrid(self, n_batch, *args, **kwargs):
"""
create batch of orthogonal grids
similar to np.mgrid
Parameters
----------
n_batch : int
number of grids to create
args : int
number of points on each axis
low : float
minimum coordinate value
high : float
maximum coordinate value
Returns
-------
grids : tf.Tensor [n_batch, len(args), args[0], ...]
batch of orthogonal grids
"""
grid = self._mgrid(*args, **kwargs)
grid = tf.expand_dims(grid, 0)
grids = tf.tile(grid, [n_batch] + [1 for _ in range(len(args) + 1)])
return grids
示例6: _build_tf_cosine_similarity
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _build_tf_cosine_similarity(max_rank=0, offset=1, eps=1e-12):
# We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
tf_db = K.placeholder(ndim=2, dtype=K.floatx()) # Where to find
tf_labels = K.placeholder(ndim=1, dtype=K.floatx()) # and their labels
tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx()) # Used in case of memory issues
batch_labels = K.placeholder(ndim=2, dtype=K.floatx()) # and their labels
all_representations_T = K.expand_dims(tf_db, axis=0) # 1 x D x N
batch_representations = K.expand_dims(tf_batch_query, axis=0) # 1 x n x D
sim = K.batch_dot(batch_representations, all_representations_T) # 1 x n x N
sim = K.squeeze(sim, axis=0) # n x N
sim /= tf.linalg.norm(tf_batch_query, axis=1, keepdims=True) + eps
sim /= tf.linalg.norm(tf_db, axis=0, keepdims=True) + eps
if max_rank > 0: # computing r@K or mAP@K
index_ranking = tf.nn.top_k(sim, k=max_rank + offset).indices
else:
index_ranking = tf.contrib.framework.argsort(sim, axis=-1, direction='DESCENDING', stable=True)
top_k = index_ranking[:, offset:]
tf_ranking = tf.gather(tf_labels, top_k)
return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
示例7: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def call(self, x, **kwargs):
assert isinstance(x, list)
inp_a, inp_b = x
last_state = K.expand_dims(inp_b[:, -1, :], 1)
m = []
for i in range(self.output_dim):
outp_a = inp_a * self.W[i]
outp_last = last_state * self.W[i]
outp_a = K.l2_normalize(outp_a, -1)
outp_last = K.l2_normalize(outp_last, -1)
outp = K.batch_dot(outp_a, outp_last, axes=[2, 2])
m.append(outp)
if self.output_dim > 1:
persp = K.concatenate(m, 2)
else:
persp = m[0]
return [persp, persp]
示例8: _find_maxima
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):
x = K.cast(x, K.floatx())
col_max = K.max(x, axis=1)
row_max = K.max(x, axis=2)
maxima = K.max(col_max, 1)
maxima = K.expand_dims(maxima, -2) / confidence_scale
cols = K.cast(K.argmax(col_max, -2), K.floatx())
rows = K.cast(K.argmax(row_max, -2), K.floatx())
cols = K.expand_dims(cols, -2) * coordinate_scale
rows = K.expand_dims(rows, -2) * coordinate_scale
maxima = K.concatenate([cols, rows, maxima], -2)
return maxima
示例9: _spectrogram_mono
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _spectrogram_mono(self, x):
'''x.shape : (None, 1, len_src),
returns 2D batch of a mono power-spectrogram'''
x = K.permute_dimensions(x, [0, 2, 1])
x = K.expand_dims(x, 3) # add a dummy dimension (channel axis)
subsample = (self.n_hop, 1)
output_real = K.conv2d(
x,
self.dft_real_kernels,
strides=subsample,
padding=self.padding,
data_format='channels_last',
)
output_imag = K.conv2d(
x,
self.dft_imag_kernels,
strides=subsample,
padding=self.padding,
data_format='channels_last',
)
output = output_real ** 2 + output_imag ** 2
# now shape is (batch_sample, n_frame, 1, freq)
if self.image_data_format == 'channels_last':
output = K.permute_dimensions(output, [0, 3, 1, 2])
else:
output = K.permute_dimensions(output, [0, 2, 3, 1])
return output
示例10: relative_logits_1d
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
rel_logits = K.expand_dims(rel_logits, axis=3)
rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
return rel_logits
示例11: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def call(self, inputs):
#To channels last
x = tf.transpose(inputs[0], [0, 3, 1, 2])
#Get weight and bias modulations
#Make sure w's shape is compatible with self.kernel
w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis = 1), axis = 1), axis = -1)
#Add minibatch layer to weights
wo = K.expand_dims(self.kernel, axis = 0)
#Modulate
weights = wo * (w+1)
#Demodulate
if self.demod:
d = K.sqrt(K.sum(K.square(weights), axis=[1,2,3], keepdims = True) + 1e-8)
weights = weights / d
#Reshape/scale input
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])
x = tf.nn.conv2d(x, w,
strides=self.strides,
padding="SAME",
data_format="NCHW")
# Reshape/scale output.
x = tf.reshape(x, [-1, self.filters, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
x = tf.transpose(x, [0, 2, 3, 1])
return x
示例12: _single_matmul
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def _single_matmul(self, x, mult):
x = K.expand_dims(x, -2)
y = tf.matmul(x, mult)[...,0,:]
return y
示例13: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def call(self, x):
# get new mean and count
this_bs_int = K.shape(x)[0]
new_mean, new_count = _mean_update(self.mean, self.count, x, self.cap)
# update op
updates = [(self.count, new_count), (self.mean, new_mean)]
self.add_update(updates, x)
# prep for broadcasting :(
p = tf.concat((K.reshape(this_bs_int, (1,)), K.shape(self.mean)), 0)
z = tf.ones(p)
# the first few 1000 should not matter that much towards this cost
return K.minimum(1., new_count/self.cap) * (z * K.expand_dims(new_mean, 0))
示例14: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def call(self, inputs, **kwargs):
""" student t-distribution, as same as used in t-SNE algorithm.
q_ij = 1/(1+dist(x_i, u_j)^2), then normalize it.
Arguments:
inputs: the variable containing data, shape=(n_samples, n_features)
Return:
q: student's t-distribution, or soft labels for each sample. shape=(n_samples, n_clusters)
"""
q = 1.0 / (1.0 + (K.sum(K.square(K.expand_dims(inputs, axis=1) - self.clusters), axis=2) / self.alpha))
q **= (self.alpha + 1.0) / 2.0
q = K.transpose(K.transpose(q) / K.sum(q, axis=1))
return q
示例15: mi_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import expand_dims [as 别名]
def mi_loss(self, y_true, y_pred):
"""Mutual information loss computed from the joint
distribution matrix and the marginals
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of softmax predictions for
the Siamese latent vectors (Z and Zbar)
"""
size = self.args.batch_size
n_labels = y_pred.shape[-1]
# lower half is Z
Z = y_pred[0: size, :]
Z = K.expand_dims(Z, axis=2)
# upper half is Zbar
Zbar = y_pred[size: y_pred.shape[0], :]
Zbar = K.expand_dims(Zbar, axis=1)
# compute joint distribution (Eq 10.3.2 & .3)
P = K.batch_dot(Z, Zbar)
P = K.sum(P, axis=0)
# enforce symmetric joint distribution (Eq 10.3.4)
P = (P + K.transpose(P)) / 2.0
# normalization of total probability to 1.0
P = P / K.sum(P)
# marginal distributions (Eq 10.3.5 & .6)
Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
P = K.clip(P, K.epsilon(), np.finfo(float).max)
Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
# negative MI loss (Eq 10.3.7)
neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
# each head contribute 1/n_heads to the total loss
return neg_mi/self.args.heads