本文整理匯總了Python中tensorflow.python.ops.init_ops.glorot_uniform_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python init_ops.glorot_uniform_initializer方法的具體用法?Python init_ops.glorot_uniform_initializer怎麽用?Python init_ops.glorot_uniform_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.init_ops
的用法示例。
在下文中一共展示了init_ops.glorot_uniform_initializer方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _get_default_initializer
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()(
shape=shape, dtype=dtype.base_dtype)
initializing_from_value = True
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
示例2: _get_default_initializer
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()(
shape=shape, dtype=dtype.base_dtype)
initializing_from_value = True
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
示例3: __init__
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name):
# Set arguments
self.vocabulary_size = vocabulary_size
self.hidden_size = hidden_size
self.float_dtype = float_dtype
self.name = name
# Create embedding matrix and its transposes
with tf.compat.v1.variable_scope(self.name):
self.embedding_table = tf.compat.v1.get_variable(name='embedding_table',
shape=[vocabulary_size, embedding_size],
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.projection_matrix = tf.transpose(a=self.embedding_table, name='vocab_projection_matrix')
示例4: __init__
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self, cell_size):
self.cell_size = cell_size
self.default_initializer = tf.get_variable_scope().initializer or init_ops.glorot_uniform_initializer()
self.initializer = tf.orthogonal_initializer()
示例5: _get_variable
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_variable(self, name, shape, dtype=None,
initializer=None, seed=None):
if dtype is None:
dtype = self.dtype
if initializer is None:
initializer = init_ops.glorot_uniform_initializer(seed=seed)
elif (isinstance(initializer, float) or
isinstance(initializer, int)):
initializer = tf.constant_initializer(float(initializer))
return tf.get_variable(name, shape, dtype, initializer)
示例6: __init__
# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self,
reference_dims,
hypothesis_dims,
hidden_dims,
float_dtype,
dropout_attn,
training,
name,
attn_type='multiplicative'):
# Declare attributes
self.reference_dims = reference_dims
self.hypothesis_dims = hypothesis_dims
self.hidden_dims = hidden_dims
self.float_dtype = float_dtype
self.attn_type = attn_type
self.training = training
self.name = name
assert attn_type in ['additive', 'multiplicative'], 'Attention type {:s} is not supported.'.format(attn_type)
if dropout_attn > 0:
self.dropout_attn = tf.keras.layers.Dropout(rate=dropout_attn)
else:
self.dropout_attn = None
# Instantiate parameters
with tf.compat.v1.variable_scope(self.name):
self.queries_projection = None
self.attn_weight = None
if attn_type == 'additive':
self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='queries_projection')
self.attn_weight = tf.compat.v1.get_variable(name='attention_weight',
shape=self.hidden_dims,
dtype=float_dtype,
initializer=glorot_uniform_initializer(),
trainable=True)
self.keys_projection = FeedForwardLayer(self.reference_dims,
self.hidden_dims,
float_dtype,
dropout_rate=0.,
activation=None,
use_bias=False,
use_layer_norm=False,
training=self.training,
name='keys_projection')