本文整理匯總了Python中tensorflow.compat.v1.truncated_normal_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.truncated_normal_initializer方法的具體用法?Python v1.truncated_normal_initializer怎麽用?Python v1.truncated_normal_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.truncated_normal_initializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: log_conv2d
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def log_conv2d(self, input_tensor, output_tensor, stride_height, stride_width,
filters, initializer, use_bias):
"""Log a conv2d call."""
if self.model == 'resnet50_v1.5':
assert stride_height == stride_width, (
'--ml_perf_compliance_logging does not support convolutions where '
'the stride height is not equal to the stride width. '
'stride_height=%d, stride_width=%d' % (stride_height, stride_width))
if isinstance(initializer, tf.truncated_normal_initializer) or (
isinstance(initializer, tf.variance_scaling_initializer) and
initializer.distribution == 'truncated_normal'):
initializer = tags.TRUNCATED_NORMAL
elif (isinstance(initializer, tf.glorot_uniform_initializer) or
initializer is None):
initializer = 'glorot_uniform'
resnet_log_helper.log_conv2d(input_tensor, output_tensor, stride_width,
filters, initializer, use_bias)
示例2: argscope
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def argscope(is_training=None, normalizer_fn=slim.layer_norm):
"""Default TF argscope used for convnet-based grasping models.
Args:
is_training: Whether this argscope is for training or inference.
normalizer_fn: Which conv/fc normalizer to use.
Returns:
Dictionary of argument overrides.
"""
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
activation_fn=tf.nn.relu,
normalizer_fn=normalizer_fn):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d], stride=2, padding='VALID') as scope:
return scope
示例3: _variable_with_weight_decay
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = _variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd is not None:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
示例4: cifarnet_arg_scope
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def cifarnet_arg_scope(weight_decay=0.004):
"""Defines the default cifarnet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
with slim.arg_scope(
[slim.conv2d],
weights_initializer=tf.truncated_normal_initializer(
stddev=5e-2),
activation_fn=tf.nn.relu):
with slim.arg_scope(
[slim.fully_connected],
biases_initializer=tf.constant_initializer(0.1),
weights_initializer=trunc_normal(0.04),
weights_regularizer=slim.l2_regularizer(weight_decay),
activation_fn=tf.nn.relu) as sc:
return sc
示例5: affine
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def affine(self,
num_out_channels,
input_layer=None,
num_channels_in=None,
bias=0.0,
stddev=None,
activation='relu'):
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
name = 'affine' + str(self.counts['affine'])
self.counts['affine'] += 1
with tf.variable_scope(name):
init_factor = 2. if activation == 'relu' else 1.
stddev = stddev or np.sqrt(init_factor / num_channels_in)
kernel = self.get_variable(
'weights', [num_channels_in, num_out_channels],
self.variable_dtype, self.dtype,
initializer=tf.truncated_normal_initializer(stddev=stddev))
biases = self.get_variable('biases', [num_out_channels],
self.variable_dtype, self.dtype,
initializer=tf.constant_initializer(bias))
mlperf.logger.log(key=mlperf.tags.MODEL_HP_DENSE,
value=num_out_channels)
logits = tf.nn.xw_plus_b(input_layer, kernel, biases)
if activation == 'relu':
mlperf.logger.log(key=mlperf.tags.MODEL_HP_RELU)
affine1 = tf.nn.relu(logits, name=name)
elif activation == 'linear' or activation is None:
affine1 = logits
else:
raise KeyError('Invalid activation type \'%s\'' % activation)
self.top_layer = affine1
self.top_size = num_out_channels
return affine1
示例6: trainable_initial_state
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def trainable_initial_state(batch_size, state_size, initial_state_init=None):
"""Make trainable initial state for an RNN cell with `state_size`."""
def create_one(i, size):
if initial_state_init is not None:
initializer = initial_state_init
else:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=1)
return get_batched_variable(
'initial_state_t{}'.format(i), batch_size, size,
initializer=initializer)
flat_vars = [create_one(i, size)
for i, size in enumerate(nest.flatten(state_size))]
return nest.pack_sequence_as(state_size, flat_vars)
示例7: pad_conv3d_lrelu
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides,
scope):
"""Pad, apply 3-D convolution and leaky relu."""
padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]]
# tf.nn.conv3d accepts a list of 5 values for strides
# with first and last value equal to 1
if isinstance(strides, numbers.Integral):
strides = [strides] * 3
strides = [1] + strides + [1]
# Filter_shape = [K, K, K, num_input, num_output]
filter_shape = (
[kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters])
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
conv_filter = tf.get_variable(
"conv_filter", shape=filter_shape,
initializer=tf.truncated_normal_initializer(stddev=0.02))
if self.hparams.use_spectral_norm:
conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter)
if self.is_training:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op)
padded = tf.pad(activations, padding)
convolved = tf.nn.conv3d(
padded, conv_filter, strides=strides, padding="VALID")
rectified = tf.nn.leaky_relu(convolved, alpha=0.2)
return rectified
示例8: apply_spectral_norm
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def apply_spectral_norm(x):
"""Normalizes x using the spectral norm.
The implementation follows Algorithm 1 of
https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
reshaped such that the number of channels (last-dimension) is the same.
Args:
x: Tensor with the last dimension equal to the number of filters.
Returns:
x: Tensor with the same shape as x normalized by the spectral norm.
assign_op: Op to be run after every step to update the vector "u".
"""
weights_shape = shape_list(x)
other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]
# Reshape into a 2-D matrix with outer size num_filters.
weights_2d = tf.reshape(x, (other, num_filters))
# v = Wu / ||W u||
with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
u = tf.get_variable(
"u", [num_filters, 1],
initializer=tf.truncated_normal_initializer(),
trainable=False)
v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))
# u_new = vW / ||v W||
u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))
# s = v*W*u
spectral_norm = tf.squeeze(
tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))
# set u equal to u_new in the next iteration.
assign_op = tf.assign(u, tf.transpose(u_new))
return tf.divide(x, spectral_norm), assign_op
示例9: instance_norm
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def instance_norm(x):
"""Instance normalization layer."""
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable(
"scale", [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
offset = tf.get_variable(
"offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
示例10: general_conv
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def general_conv(x,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding="VALID",
name="conv",
do_norm="instance",
do_relu=True,
relufactor=0):
"""Generalized convolution layer."""
with tf.variable_scope(name):
x = layers().Conv2D(
num_filters,
filter_size,
stride,
padding,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=stddev),
bias_initializer=tf.constant_initializer(0.0))(x)
if do_norm == "layer":
x = layer_norm(x)
elif do_norm == "instance":
x = instance_norm(x)
if do_relu:
if relufactor == 0:
x = tf.nn.relu(x, "relu")
else:
x = lrelu(x, leak=relufactor)
return x
示例11: create_initializer
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
示例12: create_initializer
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
示例13: create_v1_model
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def create_v1_model(albert_config, is_training, input_ids, input_mask,
segment_ids, use_one_hot_embeddings, use_einsum,
hub_module):
"""Creates a classification model."""
(_, final_hidden) = fine_tuning_utils.create_albert(
albert_config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings,
use_einsum=use_einsum,
hub_module=hub_module)
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
示例14: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def __init__(self, num_dims, num_hidden, internal_bias=False, name='nade'):
self._num_dims = num_dims
self._num_hidden = num_hidden
std = 1.0 / math.sqrt(self._num_dims)
initializer = tf.truncated_normal_initializer(stddev=std)
with tf.variable_scope(name):
# Encoder weights (`V` in [1]).
self.w_enc = tf.get_variable(
'w_enc',
shape=[self._num_dims, 1, self._num_hidden],
initializer=initializer)
# Transposed decoder weights (`W'` in [1]).
self.w_dec_t = tf.get_variable(
'w_dec_t',
shape=[self._num_dims, self._num_hidden, 1],
initializer=initializer)
# Internal encoder bias term (`b` in [1]). Will be used if external biases
# are not provided.
if internal_bias:
self.b_enc = tf.get_variable(
'b_enc',
shape=[1, self._num_hidden],
initializer=initializer)
else:
self.b_enc = None
# Internal decoder bias term (`c` in [1]). Will be used if external biases
# are not provided.
if internal_bias:
self.b_dec = tf.get_variable(
'b_dec',
shape=[1, self._num_dims],
initializer=initializer)
else:
self.b_dec = None
示例15: __call__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import truncated_normal_initializer [as 別名]
def __call__(self, reduced_dims, new_dims):
fan_in = mtf.list_product(d.size for d in reduced_dims)
fan_out = mtf.list_product(d.size for d in new_dims)
scale = self.scale
if self.mode == "fan_in":
if not unit_scaling_convention():
scale /= max(1., fan_in)
elif self.mode == "fan_out":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., fan_out)
elif self.mode == "fan_avg":
if unit_scaling_convention():
raise ValueError("Unit scaling convention only works with \"fan_in\"")
scale /= max(1., float(fan_in + fan_out) / 2)
else:
raise ValueError(
"Invalid `mode` argument: "
"expected on of {\"fan_in\", \"fan_out\", \"fan_avg\"} "
"but got %s" % (self.mode,))
stddev = scale ** 0.5
if self.distribution == "normal":
return tf.truncated_normal_initializer(stddev=stddev)
elif self.distribution == "uniform":
limit = stddev * 3. ** 0.5
return tf.random_uniform_initializer(minval=-limit, maxval=limit)
else:
raise ValueError("Invalid `distribution` argument: "
"expected one of {\"normal\", \"uniform\"} "
"but got %s" % (self.distribution,))