本文整理汇总了Python中tensorflow.sqrt方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sqrt方法的具体用法?Python tensorflow.sqrt怎么用?Python tensorflow.sqrt使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sqrt方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: minibatch_stddev_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def minibatch_stddev_layer(x, group_size=4):
with tf.variable_scope('MinibatchStddev'):
group_size = tf.minimum(group_size, tf.shape(x)[0]) # Minibatch must be divisible by (or smaller than) group_size.
s = x.shape # [NCHW] Input shape.
y = tf.reshape(x, [group_size, -1, s[1], s[2], s[3]]) # [GMCHW] Split minibatch into M groups of size G.
y = tf.cast(y, tf.float32) # [GMCHW] Cast to FP32.
y -= tf.reduce_mean(y, axis=0, keep_dims=True) # [GMCHW] Subtract mean over group.
y = tf.reduce_mean(tf.square(y), axis=0) # [MCHW] Calc variance over group.
y = tf.sqrt(y + 1e-8) # [MCHW] Calc stddev over group.
y = tf.reduce_mean(y, axis=[1,2,3], keep_dims=True) # [M111] Take average over fmaps and pixels.
y = tf.cast(y, x.dtype) # [M111] Cast back to original data type.
y = tf.tile(y, [group_size, 1, s[2], s[3]]) # [N1HW] Replicate over group and pixels.
return tf.concat([x, y], axis=1) # [NCHW] Append as new fmap.
#----------------------------------------------------------------------------
# Generator network used in the paper.
示例2: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
init = tf.random_normal(kernel_shape, dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init),
axis=(0, 1, 2)))
self.kernels = tf.Variable(init)
self.b = tf.Variable(
np.zeros((self.output_channels,)).astype('float32'))
input_shape = list(input_shape)
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = batch_size
self.output_shape = tuple(output_shape)
示例3: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def set_input_shape(self, input_shape):
batch_size, dim = input_shape
self.input_shape = [batch_size, dim]
self.output_shape = [batch_size, self.num_hid]
if self.init_mode == "norm":
init = tf.random_normal([dim, self.num_hid], dtype=tf.float32)
init = init / tf.sqrt(1e-7 + tf.reduce_sum(tf.square(init), axis=0,
keep_dims=True))
init = init * self.init_scale
elif self.init_mode == "uniform_unit_scaling":
scale = np.sqrt(3. / dim)
init = tf.random_uniform([dim, self.num_hid], dtype=tf.float32,
minval=-scale, maxval=scale)
else:
raise ValueError(self.init_mode)
self.W = PV(init)
if self.use_bias:
self.b = PV((np.zeros((self.num_hid,))
+ self.init_b).astype('float32'))
示例4: _std
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
示例5: _dist_to_opt
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def _dist_to_opt(self):
"""Distance to optimum.
Returns:
D_t ops
"""
dist_to_opt_ops = []
# Running average of the norm of gradient
self._grad_norm = tf.sqrt(self._grad_norm_squared)
avg_op = self._moving_averager.apply([self._grad_norm,])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._grad_norm_avg = self._moving_averager.average(self._grad_norm)
# Single iteration distance estimation, note here
# self._grad_norm_avg is per variable
self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg
# Running average of distance
avg_op = self._moving_averager.apply([self._d_t])
dist_to_opt_ops.append(avg_op)
with tf.control_dependencies([avg_op]):
self._dist_to_opt_avg = tf.identity(
self._moving_averager.average(self._d_t))
if self._sparsity_debias:
self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg)
return dist_to_opt_ops # D_t
示例6: xception_exit
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def xception_exit(inputs):
"""Xception exit flow."""
with tf.variable_scope("xception_exit"):
x = inputs
x_shape = x.get_shape().as_list()
if x_shape[1] is None or x_shape[2] is None:
length_float = tf.to_float(tf.shape(x)[1])
length_float *= tf.to_float(tf.shape(x)[2])
spatial_dim_float = tf.sqrt(length_float)
spatial_dim = tf.to_int32(spatial_dim_float)
x_depth = x_shape[3]
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
elif x_shape[1] != x_shape[2]:
spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2])))
if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]:
raise ValueError("Assumed inputs were square-able but they were "
"not. Shape: %s" % x_shape)
x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth])
x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME")
return tf.nn.relu(x)
示例7: embedding_matrix
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def embedding_matrix(vocab_size: int, dim: int,
name: str=None):
with tf.name_scope(None, 'embedding-matrix'):
# compute initialization paramters
shape = (vocab_size - 1, dim)
scale = tf.sqrt(1 / shape[0])
# get or initialize embedding matrix
w = tf.get_variable(
name, shape,
dtype=tf.float32,
initializer=tf.random_uniform_initializer(
minval=-scale, maxval=scale
),
trainable=True
)
# 1st row should be zero and not be updated by backprop because of
# zero padding.
emb = tf.concat([
tf.zeros((1, dim), dtype=tf.float32),
w
], 0)
return emb
示例8: conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def conv2d(self, input_, n_filters, k_size, padding='same'):
if not self.cfg.weight_scale:
return tf.layers.conv2d(input_, n_filters, k_size, padding=padding)
n_feats_in = input_.get_shape().as_list()[-1]
fan_in = k_size * k_size * n_feats_in
c = tf.constant(np.sqrt(2. / fan_in), dtype=tf.float32)
kernel_init = tf.random_normal_initializer(stddev=1.)
w_shape = [k_size, k_size, n_feats_in, n_filters]
w = tf.get_variable('kernel', shape=w_shape, initializer=kernel_init)
w = c * w
strides = [1, 1, 1, 1]
net = tf.nn.conv2d(input_, w, strides, padding=padding.upper())
b = tf.get_variable('bias', [n_filters],
initializer=tf.constant_initializer(0.))
net = tf.nn.bias_add(net, b)
return net
示例9: get_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def get_loss(predicted_transformation, batch_size, template_pointclouds_pl, source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat,predicted_position)
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
return loss
示例10: get_loss_b
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def get_loss_b(self,predicted_transformation,batch_size,template_pointclouds_pl,source_pointclouds_pl):
with tf.variable_scope('loss') as LossEvaluation:
predicted_position = tf.slice(predicted_transformation,[0,0],[batch_size,3])
predicted_quat = tf.slice(predicted_transformation,[0,3],[batch_size,4])
# with tf.variable_scope('quat_normalization') as norm:
norm_predicted_quat = tf.reduce_sum(tf.square(predicted_quat),1)
norm_predicted_quat = tf.sqrt(norm_predicted_quat)
norm_predicted_quat = tf.reshape(norm_predicted_quat,(batch_size,1))
const = tf.constant(0.0000001,shape=(batch_size,1),dtype=tf.float32)
norm_predicted_quat = tf.add(norm_predicted_quat,const)
predicted_norm_quat = tf.divide(predicted_quat,norm_predicted_quat)
transformed_predicted_point_cloud = helper.transformation_quat_tensor(source_pointclouds_pl, predicted_norm_quat, predicted_position)
# Use 1024 Points to find loss.
#loss = tf_util_loss.earth_mover(template_pointclouds_pl, transformed_predicted_point_cloud)
loss = tf_util_loss.chamfer(template_pointclouds_pl, transformed_predicted_point_cloud)
# loss = 0
return loss
示例11: gelu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
示例12: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size, trainableEmbeddings):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.constant(0.0, shape=[vocab_size, embedding_size]),
trainable=trainableEmbeddings,name="W")
self.embedded_words1 = tf.nn.embedding_lookup(self.W, self.input_x1)
self.embedded_words2 = tf.nn.embedding_lookup(self.W, self.input_x2)
print self.embedded_words1
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.stackedRNN(self.embedded_words1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.stackedRNN(self.embedded_words2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例13: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def __init__(
self, sequence_length, vocab_size, embedding_size, hidden_units, l2_reg_lambda, batch_size):
# Placeholders for input, output and dropout
self.input_x1 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x1")
self.input_x2 = tf.placeholder(tf.int32, [None, sequence_length], name="input_x2")
self.input_y = tf.placeholder(tf.float32, [None], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0, name="l2_loss")
# Embedding layer
with tf.name_scope("embedding"):
self.W = tf.Variable(
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
trainable=True,name="W")
self.embedded_chars1 = tf.nn.embedding_lookup(self.W, self.input_x1)
#self.embedded_chars_expanded1 = tf.expand_dims(self.embedded_chars1, -1)
self.embedded_chars2 = tf.nn.embedding_lookup(self.W, self.input_x2)
#self.embedded_chars_expanded2 = tf.expand_dims(self.embedded_chars2, -1)
# Create a convolution + maxpool layer for each filter size
with tf.name_scope("output"):
self.out1=self.BiRNN(self.embedded_chars1, self.dropout_keep_prob, "side1", embedding_size, sequence_length, hidden_units)
self.out2=self.BiRNN(self.embedded_chars2, self.dropout_keep_prob, "side2", embedding_size, sequence_length, hidden_units)
self.distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.out1,self.out2)),1,keep_dims=True))
self.distance = tf.div(self.distance, tf.add(tf.sqrt(tf.reduce_sum(tf.square(self.out1),1,keep_dims=True)),tf.sqrt(tf.reduce_sum(tf.square(self.out2),1,keep_dims=True))))
self.distance = tf.reshape(self.distance, [-1], name="distance")
with tf.name_scope("loss"):
self.loss = self.contrastive_loss(self.input_y,self.distance, batch_size)
#### Accuracy computation is outside of this class.
with tf.name_scope("accuracy"):
self.temp_sim = tf.subtract(tf.ones_like(self.distance),tf.rint(self.distance), name="temp_sim") #auto threshold 0.5
correct_predictions = tf.equal(self.temp_sim, self.input_y)
self.accuracy=tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
示例14: get_weight
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def get_weight(shape, gain=np.sqrt(2), use_wscale=False, fan_in=None):
if fan_in is None: fan_in = np.prod(shape[:-1])
std = gain / np.sqrt(fan_in) # He init
if use_wscale:
wscale = tf.constant(np.float32(std), name='wscale')
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal()) * wscale
else:
return tf.get_variable('weight', shape=shape, initializer=tf.initializers.random_normal(0, std))
#----------------------------------------------------------------------------
# Fully-connected layer.
示例15: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sqrt [as 别名]
def dense(x, fmaps, gain=np.sqrt(2), use_wscale=False):
if len(x.shape) > 2:
x = tf.reshape(x, [-1, np.prod([d.value for d in x.shape[1:]])])
w = get_weight([x.shape[1].value, fmaps], gain=gain, use_wscale=use_wscale)
w = tf.cast(w, x.dtype)
return tf.matmul(x, w)
#----------------------------------------------------------------------------
# Convolutional layer.