當前位置: 首頁>>代碼示例>>Python>>正文


Python init_ops.glorot_uniform_initializer方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.init_ops.glorot_uniform_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python init_ops.glorot_uniform_initializer方法的具體用法?Python init_ops.glorot_uniform_initializer怎麽用?Python init_ops.glorot_uniform_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.glorot_uniform_initializer方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _get_default_initializer

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
    """Provide a default initializer and a corresponding value.

    Args:
      name: see get_variable.
      shape: see get_variable.
      dtype: see get_variable.

    Returns:
      initializer and initializing_from_value. See get_variable above.

    Raises:
      ValueError: When giving unsupported dtype.
    """
    # If dtype is DT_FLOAT, provide a uniform unit scaling initializer
    if dtype.is_floating:
      initializer = init_ops.glorot_uniform_initializer()
      initializing_from_value = False
    # If dtype is DT_INT/DT_UINT, provide a default value `zero`
    # If dtype is DT_BOOL, provide a default value `FALSE`
    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
      initializer = init_ops.zeros_initializer()(
          shape=shape, dtype=dtype.base_dtype)
      initializing_from_value = True
    # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
    else:
      raise ValueError("An initializer for variable %s of %s is required"
                       % (name, dtype.base_dtype))

    return initializer, initializing_from_value


# To stop regularization, use this regularizer 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:35,代碼來源:variable_scope.py

示例2: _get_default_initializer

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
    """Provide a default initializer and a corresponding value.

    Args:
      name: see get_variable.
      shape: see get_variable.
      dtype: see get_variable.

    Returns:
      initializer and initializing_from_value. See get_variable above.

    Raises:
      ValueError: When giving unsupported dtype.
    """
    # If dtype is DT_FLOAT, provide a uniform unit scaling initializer
    if dtype.is_floating:
      initializer = init_ops.glorot_uniform_initializer()
      initializing_from_value = False
    # If dtype is DT_INT/DT_UINT, provide a default value `zero`
    # If dtype is DT_BOOL, provide a default value `FALSE`
    elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
      initializer = init_ops.zeros_initializer()(
          shape=shape, dtype=dtype.base_dtype)
      initializing_from_value = True
    # NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
    else:
      raise ValueError("An initializer for variable %s of %s is required"
          % (name, dtype.base_dtype))

    return initializer, initializing_from_value


# To stop regularization, use this regularizer 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:35,代碼來源:variable_scope.py

示例3: __init__

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self, vocabulary_size, embedding_size, hidden_size, float_dtype, name):
        # Set arguments
        self.vocabulary_size = vocabulary_size
        self.hidden_size = hidden_size
        self.float_dtype = float_dtype
        self.name = name

        # Create embedding matrix and its transposes
        with tf.compat.v1.variable_scope(self.name):
            self.embedding_table = tf.compat.v1.get_variable(name='embedding_table',
                                                shape=[vocabulary_size, embedding_size],
                                                dtype=float_dtype,
                                                initializer=glorot_uniform_initializer(),
                                                trainable=True)
            self.projection_matrix = tf.transpose(a=self.embedding_table, name='vocab_projection_matrix') 
開發者ID:EdinburghNLP,項目名稱:nematus,代碼行數:17,代碼來源:transformer_layers.py

示例4: __init__

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self, cell_size):
        self.cell_size = cell_size
        self.default_initializer = tf.get_variable_scope().initializer or init_ops.glorot_uniform_initializer()
        self.initializer = tf.orthogonal_initializer() 
開發者ID:alex-berard,項目名稱:seq2seq,代碼行數:6,代碼來源:rnn.py

示例5: _get_variable

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def _get_variable(self, name, shape, dtype=None,
                      initializer=None, seed=None):
        if dtype is None:
            dtype = self.dtype
        if initializer is None:
            initializer = init_ops.glorot_uniform_initializer(seed=seed)
        elif (isinstance(initializer, float) or
              isinstance(initializer, int)):
            initializer = tf.constant_initializer(float(initializer))
        return tf.get_variable(name, shape, dtype, initializer) 
開發者ID:HewlettPackard,項目名稱:dlcookbook-dlbs,代碼行數:12,代碼來源:nvcnn.py

示例6: __init__

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import glorot_uniform_initializer [as 別名]
def __init__(self,
                 reference_dims,
                 hypothesis_dims,
                 hidden_dims,
                 float_dtype,
                 dropout_attn,
                 training,
                 name,
                 attn_type='multiplicative'):

        # Declare attributes
        self.reference_dims = reference_dims
        self.hypothesis_dims = hypothesis_dims
        self.hidden_dims = hidden_dims
        self.float_dtype = float_dtype
        self.attn_type = attn_type
        self.training = training
        self.name = name

        assert attn_type in ['additive', 'multiplicative'], 'Attention type {:s} is not supported.'.format(attn_type)

        if dropout_attn > 0:
            self.dropout_attn = tf.keras.layers.Dropout(rate=dropout_attn)
        else:
            self.dropout_attn = None

        # Instantiate parameters
        with tf.compat.v1.variable_scope(self.name):
            self.queries_projection = None
            self.attn_weight = None
            if attn_type == 'additive':
                self.queries_projection = FeedForwardLayer(self.hypothesis_dims,
                                                           self.hidden_dims,
                                                           float_dtype,
                                                           dropout_rate=0.,
                                                           activation=None,
                                                           use_bias=False,
                                                           use_layer_norm=False,
                                                           training=self.training,
                                                           name='queries_projection')

                self.attn_weight = tf.compat.v1.get_variable(name='attention_weight',
                                                   shape=self.hidden_dims,
                                                   dtype=float_dtype,
                                                   initializer=glorot_uniform_initializer(),
                                                   trainable=True)

            self.keys_projection = FeedForwardLayer(self.reference_dims,
                                                    self.hidden_dims,
                                                    float_dtype,
                                                    dropout_rate=0.,
                                                    activation=None,
                                                    use_bias=False,
                                                    use_layer_norm=False,
                                                    training=self.training,
                                                    name='keys_projection') 
開發者ID:EdinburghNLP,項目名稱:nematus,代碼行數:58,代碼來源:transformer_attention_modules.py


注:本文中的tensorflow.python.ops.init_ops.glorot_uniform_initializer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。