本文整理汇总了Python中tensorflow.keras.backend.shape方法的典型用法代码示例。如果您正苦于以下问题:Python backend.shape方法的具体用法?Python backend.shape怎么用?Python backend.shape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.shape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: split_heads_2d
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def split_heads_2d(self, ip):
tensor_shape = K.shape(ip)
# batch, height, width, channels for axis = -1
tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]
batch = tensor_shape[0]
height = tensor_shape[1]
width = tensor_shape[2]
channels = tensor_shape[3]
# Save the spatial tensor dimensions
self._batch = batch
self._height = height
self._width = width
ret_shape = K.stack([batch, height, width, self.num_heads, channels // self.num_heads])
split = K.reshape(ip, ret_shape)
transpose_axes = (0, 3, 1, 2, 4)
split = K.permute_dimensions(split, transpose_axes)
return split
示例2: relative_logits
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def relative_logits(self, q):
shape = K.shape(q)
# [batch, num_heads, H, W, depth_v]
shape = [shape[i] for i in range(5)]
height = shape[2]
width = shape[3]
rel_logits_w = self.relative_logits_1d(q, self.key_relative_w, height, width,
transpose_mask=[0, 1, 2, 4, 3, 5])
rel_logits_h = self.relative_logits_1d(
K.permute_dimensions(q, [0, 1, 3, 2, 4]),
self.key_relative_h, width, height,
transpose_mask=[0, 1, 4, 2, 5, 3])
return rel_logits_h, rel_logits_w
示例3: build
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def build(self, input_shape):
assert isinstance(input_shape, list)
F = input_shape[0][-1]
if self.channels is None:
self.channels = F
self.kernel_emb = self.add_weight(shape=(F, self.channels),
name='kernel_emb',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
self.kernel_pool = self.add_weight(shape=(F, self.k),
name='kernel_pool',
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
super().build(input_shape)
示例4: loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def loss(self, y_true, y_pred):
# get the value for the true and fake images
disc_true = self.disc(y_true)
disc_pred = self.disc(y_pred)
# sample a x_hat by sampling along the line between true and pred
# z = tf.placeholder(tf.float32, shape=[None, 1])
# shp = y_true.get_shape()[0]
# WARNING: SHOULD REALLY BE shape=[batch_size, 1] !!!
# self.batch_size does not work, since it's not None!!!
alpha = K.random_uniform(shape=[K.shape(y_pred)[0], 1, 1, 1])
diff = y_pred - y_true
interp = y_true + alpha * diff
# take gradient of D(x_hat)
gradients = K.gradients(self.disc(interp), [interp])[0]
grad_pen = K.mean(K.square(K.sqrt(K.sum(K.square(gradients), axis=1))-1))
# compute loss
return (K.mean(disc_pred) - K.mean(disc_true)) + self.lambda_gp * grad_pen
示例5: _single_batch_trf
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def _single_batch_trf(self, vol):
# vol should be vol_shape + [nb_features]
# self.trf should be vol_shape + [nb_features] + [ndims]
vol_shape = vol.shape.as_list()
nb_input_dims = vol_shape[-1]
# this is inefficient...
new_vols = [None] * self.output_features
for j in range(self.output_features):
new_vols[j] = tf.zeros(vol_shape[:-1], dtype=tf.float32)
for i in range(nb_input_dims):
trf_vol = transform(vol[..., i], self.trf[..., i, j, :] * self.trf_mult, interp_method=self.interp_method)
trf_vol = tf.reshape(trf_vol, vol_shape[:-1])
new_vols[j] += trf_vol * self.mult[..., i, j]
if self.use_bias:
new_vols[j] += self.bias[..., j]
return tf.stack(new_vols, -1)
示例6: build
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def build(self, input_shape):
# Create mean and count
# These are weights because just maintaining variables don't get saved with the model, and we'd like
# to have these numbers saved when we save the model.
# But we need to make sure that the weights are untrainable.
self.mean = self.add_weight(name='mean',
shape=input_shape[1:],
initializer='zeros',
trainable=False)
self.count = self.add_weight(name='count',
shape=[1],
initializer='zeros',
trainable=False)
# self.mean = K.zeros(input_shape[1:], name='mean')
# self.count = K.variable(0.0, name='count')
super(MeanStream, self).build(input_shape) # Be sure to call this somewhere!
示例7: _mean_update
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def _mean_update(pre_mean, pre_count, x, pre_cap=None):
# compute this batch stats
this_sum = tf.reduce_sum(x, 0)
this_bs = tf.cast(K.shape(x)[0], 'float32') # this batch size
# increase count and compute weights
new_count = pre_count + this_bs
alpha = this_bs/K.minimum(new_count, pre_cap)
# compute new mean. Note that once we reach self.cap (e.g. 1000), the 'previous mean' matters less
new_mean = pre_mean * (1-alpha) + (this_sum/this_bs) * alpha
return (new_mean, new_count)
##########################################
## FFT Layers
##########################################
示例8: _generate_bert_mask
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def _generate_bert_mask(self, inputs):
def _numpy_generate_contiguous_mask(array):
mask = np.random.random(array.shape) < (1 / self.avg_seq_len)
mask = np.cumsum(mask, 1)
seqvals = np.max(mask)
mask_prob = self.percentage * array.shape[1] / seqvals # increase probability because fewer sequences
vals_to_mask = np.arange(seqvals)[np.random.random((seqvals,)) < mask_prob]
indices_to_mask = np.isin(mask, vals_to_mask)
mask[indices_to_mask] = 1
mask[~indices_to_mask] = 0
return np.asarray(mask, np.bool)
bert_mask = tf.py_func(_numpy_generate_contiguous_mask, [inputs], tf.bool)
bert_mask.set_shape(inputs.shape)
return bert_mask
示例9: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def call(self, inputs):
"""
Args:
sequence: tf.Tensor[int32] - Amino acid sequence,
a padded tensor with shape [batch_size, MAX_PROTEIN_LENGTH]
protein_length: tf.Tensor[int32] - Length of each protein in the sequence, a tensor with shape [batch_size]
Output:
amino_acid_probs: tf.Tensor[float32] - Probability of each type of amino acid,
a tensor with shape [batch_size, MAX_PROTEIN_LENGTH, n_symbols]
"""
sequence = inputs['primary']
protein_length = inputs['protein_length']
sequence_mask = rk.utils.convert_sequence_length_to_sequence_mask(
sequence, protein_length)
masked_sequence, bert_mask = self.bert_mask(sequence, sequence_mask)
inputs['original_sequence'] = sequence
inputs['primary'] = masked_sequence
inputs['bert_mask'] = bert_mask
return inputs
示例10: convert_sequence_vocab
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def convert_sequence_vocab(self, sequence):
PFAM_TO_BEPLER_ENCODED = {encoding: UNIPROT_BEPLER.get(aa, 20) for aa, encoding in PFAM_VOCAB.items()}
PFAM_TO_BEPLER_ENCODED[PFAM_VOCAB['<PAD>']] = 0
def to_uniprot_bepler(seq):
new_seq = np.zeros_like(seq)
for pfam_encoding, uniprot_encoding in PFAM_TO_BEPLER_ENCODED.items():
new_seq[seq == pfam_encoding] = uniprot_encoding
return new_seq
new_sequence = tf.py_func(to_uniprot_bepler, [sequence], sequence.dtype)
new_sequence.set_shape(sequence.shape)
return new_sequence
示例11: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
# K is the keras backend
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例12: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def sampling(args):
"""Implements reparameterization trick by sampling
from a gaussian with zero mean and std=1.
Arguments:
args (tensor): mean and log of variance of Q(z|X)
Returns:
sampled latent vector (tensor)
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例13: sampling
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例14: build
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def build(self, input_shape):
assert len(input_shape) == 3
assert input_shape[0] == input_shape[1]
assert input_shape[0][:-1] == input_shape[2][:-1]
input_dim, features_dim = input_shape[0][-1], input_shape[2][-1]
if self.use_intermediate_layer:
self.first_kernel = self.add_weight(
shape=(features_dim, self.intermediate_dim),
initializer="random_uniform", name='first_kernel')
self.first_bias = self.add_weight(
shape=(self.intermediate_dim,),
initializer="random_uniform", name='first_bias')
self.features_kernel = self.add_weight(
shape=(features_dim, 1), initializer="random_uniform", name='kernel')
self.features_bias = self.add_weight(
shape=(1,), initializer=Constant(self.bias_initializer), name='bias')
if self.use_dimension_bias:
self.dimensions_bias = self.add_weight(
shape=(input_dim,), initializer="random_uniform", name='dimension_bias')
super(WeightedCombinationLayer, self).build(input_shape)
示例15: gather_indexes
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import shape [as 别名]
def gather_indexes(A: tf.Tensor, B: tf.Tensor) -> tf.Tensor:
"""
Args:
A: a tensor with data
B: an integer tensor with indexes
Returns:
`answer` a tensor such that ``answer[i, j] = A[i, B[i, j]]``.
In case `B` is one-dimensional, the output is ``answer[i] = A[i, B[i]]``
"""
are_indexes_one_dim = (kb.ndim(B) == 1)
if are_indexes_one_dim:
B = tf.expand_dims(B, -1)
first_dim_indexes = tf.expand_dims(tf.range(tf.shape(B)[0]), -1)
first_dim_indexes = tf.tile(first_dim_indexes, [1, tf.shape(B)[1]])
indexes = tf.stack([first_dim_indexes, B], axis=-1)
answer = tf.gather_nd(A, indexes)
if are_indexes_one_dim:
answer = answer[:,0]
return answer