本文整理汇总了Python中tensorflow.python.ops.init_ops.truncated_normal_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python truncated_normal_initializer函数的具体用法?Python truncated_normal_initializer怎么用?Python truncated_normal_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了truncated_normal_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testInitializerDifferent
def testInitializerDifferent(self):
for dtype in [dtypes.float32, dtypes.float64]:
init1 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=1, dtype=dtype)
init2 = init_ops.truncated_normal_initializer(
0.0, 1.0, seed=2, dtype=dtype)
self.assertFalse(identicaltest(self, init1, init2))
示例2: testInitFromPartitionVar
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.test_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
self.assertAllEqual(my2_values, v1)
# New graph and session.
with ops.Graph().as_default() as g:
with self.test_session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
示例3: _define_vars
def _define_vars(self, params, **kwargs):
with ops.device(self.device_assigner):
self.tree_parameters = variable_scope.get_variable(
name='tree_parameters_%d' % self.layer_num,
shape=[params.num_nodes, params.num_features_per_node],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
self.tree_thresholds = variable_scope.get_variable(
name='tree_thresholds_%d' % self.layer_num,
shape=[params.num_nodes],
initializer=init_ops.truncated_normal_initializer(
mean=params.weight_init_mean, stddev=params.weight_init_std))
示例4: testWithScopes
def testWithScopes(self):
init_value0 = np.asarray([1.0, 3.0, 9.0]).reshape((1, 3, 1))
init_value1 = np.asarray([2.0, 4.0, 6.0, 8.0]).reshape((2, 1, 2))
with self.test_session() as sess:
initializer = init_ops.truncated_normal_initializer(stddev=.1)
with variable_scope.variable_scope('my_model/my_layer0'):
var0 = variables_lib2.variable(
'my_var0', shape=[1, 3, 1], initializer=initializer)
with variable_scope.variable_scope('my_model/my_layer1'):
var1 = variables_lib2.variable(
'my_var1', shape=[2, 1, 2], initializer=initializer)
var_names_to_values = {
'my_model/my_layer0/my_var0': init_value0,
'my_model/my_layer1/my_var1': init_value1
}
init_fn = variables_lib2.assign_from_values_fn(var_names_to_values)
# Initialize the variables.
sess.run(variables_lib.global_variables_initializer())
# Perform the assignment.
init_fn(sess)
# Request and test the variable values:
var0, var1 = sess.run([var0, var1])
self.assertAllEqual(init_value0, var0)
self.assertAllEqual(init_value1, var1)
示例5: load_embedding_initializer
def load_embedding_initializer(ckpt_path,
embedding_tensor_name,
new_vocab_size,
embedding_dim,
old_vocab_file,
new_vocab_file,
num_oov_buckets=0,
initializer=None):
"""Returns a variable initializer for loading pre-trained embeddings.
Wrapper around `load_and_remap_matrix_initializer()` specialized for loading
embedding weights and remapping according to the provided vocab files. See
docs for `load_and_remap_matrix_initializer()` for more details.
NOTE: Only for use with div-partitioned variables / vocabularies.
Args:
ckpt_path: Path to the TensorFlow checkpoint (version 2, `TensorBundle`)
from which the old matrix `Tensor` will be loaded.
embedding_tensor_name: Name of the 2-D `Tensor` to load from checkpoint.
new_vocab_size: Number of entries in the new vocab.
embedding_dim: `int` specifying the dimension of the embedding vectors from
the checkpoint. Must match the number of columns in the old embedding
matrix.
old_vocab_file: A scalar `Tensor` of type `string` containing the
path to the old vocabulary file.
new_vocab_file: A scalar `Tensor` of type `string` containing the
path to the new vocabulary file.
num_oov_buckets: `int` specifying the number of out-of-vocabulary
buckets to use. Must be >= 0.
initializer: Initializer function that accepts a 1-D tensor as the arg to
specify the shape of the returned tensor. If `None`, defaults to using
`truncated_normal_initializer()`.
Returns:
A variable initializer function.
"""
if initializer is None:
# TODO(b/25671353): This should be kept in sync with the stddev used by
# feature_column.py's _EmbeddingColumn.
initializer = init_ops.truncated_normal_initializer(
stddev=1.0 /
math_ops.sqrt(math_ops.cast(embedding_dim, dtypes.float32)))
return load_and_remap_matrix_initializer(
ckpt_path=ckpt_path,
old_tensor_name=embedding_tensor_name,
new_row_vocab_size=new_vocab_size,
new_col_vocab_size=embedding_dim,
old_row_vocab_file=old_vocab_file,
new_row_vocab_file=new_vocab_file,
old_col_vocab_file=None,
new_col_vocab_file=None,
num_row_oov_buckets=num_oov_buckets,
num_col_oov_buckets=0,
initializer=initializer)
示例6: _WeightInit
def _WeightInit(self, stddev):
"""Returns truncated normal variable initializer.
Function is defined purely to shorten the name so that it stops wrapping.
Args:
stddev: Standard deviation of normal variable.
Returns:
An initialized that initializes with a truncated normal variable.
"""
return init_ops.truncated_normal_initializer(stddev=stddev)
示例7: _random_weights
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = partitioned_variables.create_partitioned_variables(
shape=[size],
slicing=[num_shards],
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
示例8: _random_weights
def _random_weights(self, size=50, num_shards=1):
assert size > 0
assert num_shards > 0
assert num_shards <= size
embedding_weights = list(variable_scope.get_variable(
"embedding_weights",
shape=[size],
partitioner=partitioned_variables.fixed_size_partitioner(num_shards),
initializer=init_ops.truncated_normal_initializer(
mean=0.0, stddev=1.0, dtype=dtypes.float32)))
for w in embedding_weights:
w.initializer.run()
return embedding_weights
示例9: __new__
def __new__(cls,
sparse_id_column,
dimension,
combiner="mean",
initializer=None):
if initializer is not None and not callable(initializer):
raise ValueError("initializer must be callable if specified.")
if initializer is None:
stddev = 1 / math.sqrt(sparse_id_column.length)
# TODO(b/25671353): Better initial value?
initializer = init_ops.truncated_normal_initializer(mean=0.0,
stddev=stddev)
return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column,
dimension, combiner,
initializer)
示例10: __new__
def __new__(cls,
vocabulary_size,
dimension,
initializer=None,
combiner='mean'):
"""Embedding table configuration.
Args:
vocabulary_size: Number of vocabulary (/rows) in the table.
dimension: The embedding dimension.
initializer: A variable initializer function to be used in embedding
variable initialization. If not specified, defaults to
`tf.truncated_normal_initializer` with mean `0.0` and standard deviation
`1/sqrt(dimension)`.
combiner: A string specifying how to reduce if there are multiple entries
in a single row. Currently 'mean', 'sqrtn', 'sum' and None are
supported, with 'mean' the default. 'sqrtn' often achieves good
accuracy, in particular with bag-of-words columns. For more information,
see `tf.nn.embedding_lookup_sparse`. None is only valid for dense rather
than sparse tensors.
Returns:
`TableConfig`.
Raises:
ValueError: if `vocabulary_size` is not positive integer.
ValueError: if `dimension` is not positive integer.
ValueError: if `initializer` is specified and is not callable.
ValueError: if `combiner` is not supported.
"""
if not isinstance(vocabulary_size, int) or vocabulary_size < 1:
raise ValueError('Invalid vocabulary_size {}.'.format(vocabulary_size))
if not isinstance(dimension, int) or dimension < 1:
raise ValueError('Invalid dimension {}.'.format(dimension))
if (initializer is not None) and (not callable(initializer)):
raise ValueError('initializer must be callable if specified.')
if initializer is None:
initializer = init_ops.truncated_normal_initializer(
mean=0.0, stddev=1 / math.sqrt(dimension))
if combiner not in ('mean', 'sum', 'sqrtn', None):
raise ValueError('Invalid combiner {}'.format(combiner))
return super(TableConfig, cls).__new__(cls, vocabulary_size, dimension,
initializer, combiner)
示例11: _create_embedding_lookup
def _create_embedding_lookup(input_tensor, vocab_size, dimension,
weight_collections, stddev, combiner, trainable,
name):
"""Creates embedding variable and does a lookup.
Args:
input_tensor: A tensor which should contain sparse id to look up.
vocab_size: An integer specifying the vocabulary size.
dimension: An integer specifying the embedding vector dimension.
weight_collections: List of graph collections to which weights are added.
stddev: the standard deviation to be used in embedding initialization.
combiner: A string specifying how to reduce if the sparse column is
multivalent. Currently "mean", "sqrtn" and "sum" are supported:
* "sum": do not normalize features in the column
* "mean": do l1 normalization on features in the column
* "sqrtn": do l2 normalization on features in the column
For more information: `tf.embedding_lookup_sparse`.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: A string specifying the name of the embedding variable.
Returns:
A Tensor with shape [batch_size, dimension] and embedding Variable.
"""
slicing = _max_size_embedding_partitioner()(vocab_size, dimension)
logging.info("Slicing=%s for name=%s, vocab_size=%d, embed_dim=%d",
str(slicing), name, vocab_size, dimension)
if stddev > 0:
initializer = init_ops.truncated_normal_initializer(stddev=stddev)
else:
initializer = init_ops.zeros_initializer
embeddings = partitioned_variables.create_partitioned_variables(
shape=[vocab_size, dimension],
slicing=slicing,
initializer=initializer,
dtype=dtypes.float32,
collections=weight_collections,
name=name,
reuse=False,
trainable=trainable)
return contrib_embedding_ops.safe_embedding_lookup_sparse(
embeddings,
input_tensor,
default_id=0,
combiner=combiner,
name=name), embeddings
示例12: inception_v3_arg_scope
def inception_v3_arg_scope(weight_decay=0.00004,
stddev=0.1,
batch_norm_var_collection='moving_vars',
use_fused_batchnorm=True):
"""Defines the default InceptionV3 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
use_fused_batchnorm: Enable fused batchnorm.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# Use fused batch norm if possible.
'fused': use_fused_batchnorm,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=init_ops.truncated_normal_initializer(
stddev=stddev),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
示例13: _create_partition_checkpoints
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
示例14: _conv
def _conv(args, output_size, filter_size, stddev=0.001, bias=True, bias_start=0.0, scope=None):
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("`args` must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 3.
# (batch_size x height x width x arg_size)
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
height = shapes[0][1]
width = shapes[0][2]
for shape in shapes:
if len(shape) != 4:
raise ValueError("Conv is expecting 3D arguments: %s" % str(shapes))
if not shape[3]:
raise ValueError("Conv expects shape[3] of arguments: %s" % str(shapes))
if shape[1] == height and shape[2] == width:
total_arg_size += shape[3]
else :
raise ValueError("Inconsistent height and width size in arguments: %s" % str(shapes))
with vs.variable_scope(scope or "Conv"):
kernel = vs.get_variable("Kernel",
[filter_size[0], filter_size[1], total_arg_size, output_size],
initializer=init_ops.truncated_normal_initializer(stddev=stddev))
if len(args) == 1:
res = tf.nn.conv2d(args[0], kernel, [1, 1, 1, 1], padding='SAME')
else:
res = tf.nn.conv2d(array_ops.concat(3, args), kernel, [1, 1, 1, 1], padding='SAME')
if not bias: return res
bias_term = vs.get_variable( "Bias", [output_size],
initializer=init_ops.constant_initializer(bias_start))
return res + bias_term
示例15: inception_v3_base
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v3_base(inputs,
final_endpoint='Mixed_7c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception model from http://arxiv.org/abs/1512.00567.
Constructs an Inception v3 network from inputs to the given final endpoint.
This method can construct the network up to the final inception block
Mixed_7c.
Note that the names of the layers in the paper do not correspond to the names
of the endpoints registered by this function although they build the same