本文整理汇总了Python中tensorflow.truncated_normal_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.truncated_normal_initializer方法的具体用法?Python tensorflow.truncated_normal_initializer怎么用?Python tensorflow.truncated_normal_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.truncated_normal_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cifarnet_arg_scope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
示例2: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
var = _variable_on_cpu(
name,
shape,
tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例3: _variable_with_weight_decay
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例4: _Deconv
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def _Deconv(self, net, out_filters, kernel_size, stride):
shape = net.get_shape().as_list()
in_filters = shape[3]
kernel_shape = [kernel_size, kernel_size, out_filters, in_filters]
weights = tf.get_variable(
name='weights',
shape=kernel_shape,
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.01))
out_height = shape[1] * stride
out_width = shape[2] * stride
batch_size = shape[0]
output_shape = [batch_size, out_height, out_width, out_filters]
net = tf.nn.conv2d_transpose(net, weights, output_shape,
[1, stride, stride, 1], padding='SAME')
slim.batch_norm(net)
return net
示例5: _deconvolutional_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def _deconvolutional_layer(input, is_training, filters):
# Implements transposed convolutional layers. Returns data with double the shape of input
output = tf.layers.conv2d_transpose(
input,
filters=filters,
kernel_size=(3, 3),
strides=2,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
#output = tf.layers.batch_normalization(output, training=is_training)
output = tf.layers.conv2d_transpose(
output,
filters=filters,
kernel_size=(3, 3),
strides=2,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
#output = tf.layers.batch_normalization(output, training=is_training)
return output
示例6: _convolutional_layer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def _convolutional_layer(input, filters, strides, is_training):
"""Constructs a conv2d layer followed by batch normalization, and max pooling"""
x = tf.layers.conv2d(
input,
filters=filters,
kernel_size=(3, 3),
strides=strides,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.1),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001)
)
x = tf.layers.batch_normalization(x, training=is_training)
output = tf.layers.max_pooling2d(x, 2, 2)
return output
示例7: M_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def M_step(log_R, log_activation, vote, lambda_val=0.01):
R_shape = tf.shape(log_R)
log_R = log_R + log_activation
R_sum_i = cl.reduce_sum(tf.exp(log_R), axis=-3, keepdims=True)
log_normalized_R = log_R - tf.reduce_logsumexp(log_R, axis=-3, keepdims=True)
pose = cl.reduce_sum(vote * tf.exp(log_normalized_R), axis=-3, keepdims=True)
log_var = tf.reduce_logsumexp(log_normalized_R + cl.log(tf.square(vote - pose)), axis=-3, keepdims=True)
beta_v = tf.get_variable('beta_v',
shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
initializer=tf.truncated_normal_initializer(mean=15., stddev=3.))
cost = R_sum_i * (beta_v + 0.5 * log_var)
beta_a = tf.get_variable('beta_a',
shape=[1 for i in range(len(pose.shape) - 2)] + [pose.shape[-2], 1],
initializer=tf.truncated_normal_initializer(mean=100.0, stddev=10))
cost_sum_h = cl.reduce_sum(cost, axis=-1, keepdims=True)
logit = lambda_val * (beta_a - cost_sum_h)
log_activation = tf.log_sigmoid(logit)
return(pose, log_var, log_activation)
示例8: create_initializer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
示例9: embed
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def embed(inputs, vocab_size, num_units, zero_pad=True, scope="embedding", reuse=None):
'''Embeds a given tensor.
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
return outputs
示例10: get_weight_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def get_weight_variable(shape, regularizer):
weights = tf.get_variable(
"weigths", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
# 如果给出了正则生成函数,加入 losses 集合
if regularizer is not None:
tf.add_to_collection('losses', regularizer(weights))
return weights
# 定义前向传播
示例11: conv_relu
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def conv_relu(inputs, filters, k_size, stride, padding, scope_name):
'''
A method that does convolution + relu on inputs
'''
with tf.compat.v1.variable_scope(scope_name, reuse=tf.compat.v1.AUTO_REUSE) as scope:
in_channels = inputs.shape[-1]
kernel = tf.compat.v1.get_variable('kernel',
[k_size, k_size, in_channels, filters],
initializer=tf.truncated_normal_initializer())
biases = tf.compat.v1.get_variable('biases',
[filters],
initializer=tf.random_normal_initializer())
conv = tf.nn.conv2d(inputs, kernel, strides=[1, stride, stride, 1], padding=padding)
return tf.nn.relu(conv + biases, name=scope.name)
示例12: fully_connected
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def fully_connected(inputs, out_dim, scope_name='fc'):
'''
A fully connected linear layer on inputs
'''
with tf.compat.v1.variable_scope(scope_name, reuse=tf.compat.v1.AUTO_REUSE) as scope:
in_dim = inputs.shape[-1]
w = tf.compat.v1.get_variable('weights', [in_dim, out_dim],
initializer=tf.truncated_normal_initializer())
b = tf.compat.v1.get_variable('biases', [out_dim],
initializer=tf.constant_initializer(0.0))
out = tf.matmul(inputs, w) + b
return out
示例13: mobilenet_v1_arg_scope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def mobilenet_v1_arg_scope(is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'is_training': is_training,
'center': True,
'scale': True,
'decay': 0.9997,
'epsilon': 0.001,
}
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
示例14: lenet_arg_scope
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def lenet_arg_scope(weight_decay=0.0):
"""Defines the default lenet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.truncated_normal_initializer(stddev=0.1),
activation_fn=tf.nn.relu) as sc:
return sc
示例15: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import truncated_normal_initializer [as 别名]
def __init__(self, image_size, num_channels, hidden_dim):
self.image_size = image_size
self.num_channels = num_channels
self.hidden_dim = hidden_dim
self.matrix_init = tf.truncated_normal_initializer(stddev=0.1)
self.vector_init = tf.constant_initializer(0.0)