本文整理汇总了Python中tensorflow.square方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.square方法的具体用法?Python tensorflow.square怎么用?Python tensorflow.square使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.square方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def call(self, inputs, **kwargs):
if K.ndim(inputs) != 3:
raise ValueError(
"Unexpected inputs dimensions %d, expect to be 3 dimensions"
% (K.ndim(inputs)))
concated_embeds_value = inputs
square_of_sum = tf.square(tf.reduce_sum(
concated_embeds_value, axis=1, keep_dims=True))
sum_of_square = tf.reduce_sum(
concated_embeds_value * concated_embeds_value, axis=1, keep_dims=True)
cross_term = square_of_sum - sum_of_square
cross_term = 0.5 * tf.reduce_sum(cross_term, axis=2, keep_dims=False)
return cross_term
示例2: minibatch_stddev_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def minibatch_stddev_layer(x, group_size=4):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keep_dims=True) # [GMCHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True) # [M111] Take average over fmaps and pixels.
y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Generator network used in the paper.
示例3: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.init_mode == "norm":
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
init = init * self.init_scale
elif self.init_mode == "uniform_unit_scaling":
scale = np.sqrt(3. / dim)
init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
minval=-scale, maxval=scale)
else:
raise ValueError(self.init_mode)
self.W = PV(init)
if self.use_bias:
self.b = PV((np.zeros((self.num_hid,))
+ self.init_b).astype('float32'))
示例4: _compute_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a (scalar) tensor representing the value of the loss function
or a float tensor of shape [batch_size, num_anchors]
"""
weighted_diff = (prediction_tensor - target_tensor) * tf.expand_dims(
weights, 2)
square_diff = 0.5 * tf.square(weighted_diff)
if self._anchorwise_output:
return tf.reduce_sum(square_diff, 2)
return tf.reduce_sum(square_diff)
示例5: diag_gaussian_log_likelihood
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2*np.pi) + \
tf.square((z-mu)/tf.exp(0.5*logvar)))
示例6: gaussian_pos_log_likelihood
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
"""Gaussian log-likelihood function for a posterior in VAE
Note: This function is specialized for a posterior distribution, that has the
form of z = mean + sigma * noise.
Args:
unused_mean: ignore
logvar: The log variance of the distribution
noise: The noise used in the sampling of the posterior.
Returns:
The log-likelihood under the Gaussian model.
"""
# ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))
示例7: log_prob_action
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def log_prob_action(self, action, logits,
sampling_dim, act_dim, act_type):
"""Calculate log-prob of action sampled from distribution."""
if self.env_spec.is_discrete(act_type):
act_log_prob = tf.reduce_sum(
tf.one_hot(action, act_dim) * tf.nn.log_softmax(logits), -1)
elif self.env_spec.is_box(act_type):
means = logits[:, :sampling_dim / 2]
std = logits[:, sampling_dim / 2:]
act_log_prob = (- 0.5 * tf.log(2 * np.pi * tf.square(std))
- 0.5 * tf.square(action - means) / tf.square(std))
act_log_prob = tf.reduce_sum(act_log_prob, -1)
else:
assert False
return act_log_prob
示例8: cv_squared
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def cv_squared(x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
epsilon = 1e-10
float_size = tf.to_float(tf.size(x)) + epsilon
mean = tf.reduce_sum(x) / float_size
variance = tf.reduce_sum(tf.square(x - mean)) / float_size
return variance / (tf.square(mean) + epsilon)
示例9: vae
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def vae(x, name, z_size):
"""Simple variational autoencoder without discretization.
Args:
x: Input to the discretization bottleneck.
name: Name for the bottleneck scope.
z_size: Number of bits used to produce discrete code; discrete codes range
from 1 to 2**z_size.
Returns:
Embedding function, latent, loss, mu and log_simga.
"""
with tf.variable_scope(name):
mu = tf.layers.dense(x, z_size, name="mu")
log_sigma = tf.layers.dense(x, z_size, name="log_sigma")
shape = common_layers.shape_list(x)
epsilon = tf.random_normal([shape[0], shape[1], 1, z_size])
z = mu + tf.exp(log_sigma / 2) * epsilon
kl = 0.5 * tf.reduce_mean(
tf.exp(log_sigma) + tf.square(mu) - 1. - log_sigma, axis=-1)
free_bits = z_size // 4
kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0))
return z, kl_loss, mu, log_sigma
示例10: vq_nearest_neighbor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def vq_nearest_neighbor(x, means, soft_em=False, num_samples=10):
"""Find the nearest element in means to elements in x."""
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if soft_em:
x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=common_layers.shape_list(means)[0])
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
x_means_idx = tf.argmax(-dist, axis=-1)
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
x_means = tf.matmul(x_means_hot_flat, means)
e_loss = tf.reduce_mean(tf.square(x - tf.stop_gradient(x_means)))
return x_means_hot, e_loss
示例11: setup_critic_optimizer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def setup_critic_optimizer(self):
logger.info('setting up critic optimizer')
normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1])
self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))
if self.critic_l2_reg > 0.:
critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name]
for var in critic_reg_vars:
logger.info(' regularizing: {}'.format(var.name))
logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))
critic_reg = tc.layers.apply_regularization(
tc.layers.l2_regularizer(self.critic_l2_reg),
weights_list=critic_reg_vars
)
self.critic_loss += critic_reg
critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars]
critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])
logger.info(' critic shapes: {}'.format(critic_shapes))
logger.info(' critic params: {}'.format(critic_nb_params))
self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm)
self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars,
beta1=0.9, beta2=0.999, epsilon=1e-08)
示例12: compute_mfcc
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def compute_mfcc(audio, **kwargs):
"""
Compute the MFCC for a given audio waveform. This is
identical to how DeepSpeech does it, but does it all in
TensorFlow so that we can differentiate through it.
"""
batch_size, size = audio.get_shape().as_list()
audio = tf.cast(audio, tf.float32)
# 1. Pre-emphasizer, a high-pass filter
audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)
# 2. windowing into frames of 320 samples, overlapping
windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)
# 3. Take the FFT to convert to frequency space
ffted = tf.spectral.rfft(windowed, [512])
ffted = 1.0 / 512 * tf.square(tf.abs(ffted))
# 4. Compute the Mel windowing of the FFT
energy = tf.reduce_sum(ffted,axis=2)+1e-30
filters = np.load("filterbanks.npy").T
feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30
# 5. Take the DCT again, because why not
feat = tf.log(feat)
feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]
# 6. Amplify high frequencies for some reason
_,nframes,ncoeff = feat.get_shape().as_list()
n = np.arange(ncoeff)
lift = 1 + (22/2.)*np.sin(np.pi*n/22)
feat = lift*feat
width = feat.get_shape().as_list()[1]
# 7. And now stick the energy next to the features
feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
return feat
示例13: contrastive_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def contrastive_loss(self, y,d,batch_size):
tmp= y *tf.square(d)
#tmp= tf.mul(y,tf.square(d))
tmp2 = (1-y) *tf.square(tf.maximum((1 - d),0))
return tf.reduce_sum(tmp +tmp2)/batch_size/2
示例14: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import square [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
trainable=True,name="W")
self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
#self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
#self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")