当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.random_uniform_initializer方法代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.random_uniform_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python init_ops.random_uniform_initializer方法的具体用法?Python init_ops.random_uniform_initializer怎么用?Python init_ops.random_uniform_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.random_uniform_initializer方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def call(self, inputs, state):
    """Run the cell on embedded inputs."""
    with ops.device("/cpu:0"):
      if self._initializer:
        initializer = self._initializer
      elif vs.get_variable_scope().initializer:
        initializer = vs.get_variable_scope().initializer
      else:
        # Default initializer for embeddings should have variance=1.
        sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
        initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

      if isinstance(state, tuple):
        data_type = state[0].dtype
      else:
        data_type = state.dtype

      embedding = vs.get_variable(
          "embedding", [self._embedding_classes, self._embedding_size],
          initializer=initializer,
          dtype=data_type)
      embedded = embedding_ops.embedding_lookup(embedding,
                                                array_ops.reshape(inputs, [-1]))

      return self._cell(embedded, state) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:core_rnn_cell.py

示例2: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or "embedding_wrapper"):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:core_rnn_cell_impl.py

示例3: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:Guanghan,项目名称:ROLO,代码行数:27,代码来源:rnn_cell.py

示例4: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
        """Run the cell on embedded inputs."""
        with vs.variable_scope(scope or type(self).__name__):    # "EmbeddingWrapper"
            with ops.device("/cpu:0"):
                if self._initializer:
                    initializer = self._initializer
                elif vs.get_variable_scope().initializer:
                    initializer = vs.get_variable_scope().initializer
                else:
                    # Default initializer for embeddings should have variance=1.
                    sqrt3 = math.sqrt(3)    # Uniform(-sqrt(3), sqrt(3)) has variance=1.
                    initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

                if type(state) is tuple:
                    data_type = state[0].dtype
                else:
                    data_type = state.dtype

                embedding = vs.get_variable(
                        "embedding", [self._embedding_classes, self._embedding_size],
                        initializer=initializer,
                        dtype=data_type)
                embedded = embedding_ops.embedding_lookup(
                        embedding, array_ops.reshape(inputs, [-1]))
        return self._cell(embedded, state) 
开发者ID:thu-coai,项目名称:ecm,代码行数:27,代码来源:rnn_cell.py

示例5: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with _checked_scope(self, scope or "embedding_wrapper", reuse=self._reuse):
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:ratschlab,项目名称:RGAN,代码行数:27,代码来源:mod_core_rnn_cell_impl.py

示例6: _build

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def _build(self):
        """ build embedding table and
        build position embedding table if timing=="emb"

        :return:
        """
        self._embeddings = variable_scope.get_variable(
            name=(self._name or "embedding_table"),
            shape=[self._vocab_size, self._dimension],
            initializer=init_ops.random_uniform_initializer(
                -self._init_scale, self._init_scale))
        if self._timing == "emb":
            self._position_embedding = variable_scope.get_variable(
                name=(self._name or "embedding_table") + "_posi",
                shape=[self._maximum_position, self._dimension],
                initializer=init_ops.random_uniform_initializer(
                    -self._init_scale, self._init_scale)) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:19,代码来源:embedding.py

示例7: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "eunn_cell"):

            state = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)

            input_matrix_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            if self._comp:
                input_matrix_re = vs.get_variable("U_re", [inputs.get_shape()[-1], self._hidden_size],
                                                  initializer=input_matrix_init)
                input_matrix_im = vs.get_variable("U_im", [inputs.get_shape()[-1], self._hidden_size],
                                                  initializer=input_matrix_init)
                inputs_re = math_ops.matmul(inputs, input_matrix_re)
                inputs_im = math_ops.matmul(inputs, input_matrix_im)
                inputs = math_ops.complex(inputs_re, inputs_im)
            else:
                input_matrix = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size],
                                               initializer=input_matrix_init)
                inputs = math_ops.matmul(inputs, input_matrix)

            bias = vs.get_variable("modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer())
            output = self._activation((inputs + state), bias, self._comp)

        return output, output 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:25,代码来源:EUNN.py

示例8: random_uniform_variable

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
  """Instantiates a variable with values drawn from a uniform distribution.

  Arguments:
      shape: Tuple of integers, shape of returned Keras variable.
      low: Float, lower boundary of the output interval.
      high: Float, upper boundary of the output interval.
      dtype: String, dtype of returned Keras variable.
      name: String, name of returned Keras variable.
      seed: Integer, random seed.

  Returns:
      A Keras variable, filled with drawn samples.

  Example:
  ```python
      # TensorFlow example
      >>> kvar = K.random_uniform_variable((2,3), 0, 1)
      >>> kvar
      <tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
      >>> K.eval(kvar)
      array([[ 0.10940075,  0.10047495,  0.476143  ],
             [ 0.66137183,  0.00869417,  0.89220798]], dtype=float32)
  ```
  """
  if dtype is None:
    dtype = floatx()
  shape = tuple(map(int, shape))
  tf_dtype = _convert_string_dtype(dtype)
  if seed is None:
    # ensure that randomness is conditioned by the Numpy RNG
    seed = np.random.randint(10e8)
  value = init_ops.random_uniform_initializer(
      low, high, dtype=tf_dtype, seed=seed)(shape)
  return variable(value, dtype=dtype, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:backend.py

示例9: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "goru_cell"):

            U_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            b_init = init_ops.constant_initializer(2.)
            mod_b_init = init_ops.constant_initializer(2.)

            U = vs.get_variable("U", [inputs.get_shape(
            )[-1], self._hidden_size * 3], dtype=tf.float32, initializer=U_init)
            Ux = math_ops.matmul(inputs, U)
            U_cx, U_rx, U_gx = array_ops.split(Ux, 3, axis=1)

            W_r = vs.get_variable(
                "W_r", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer=U_init)
            W_g = vs.get_variable(
                "W_g", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer=U_init)
            W_rh = math_ops.matmul(state, W_r)
            W_gh = math_ops.matmul(state, W_g)

            bias_r = vs.get_variable(
                "bias_r", [self._hidden_size], dtype=tf.float32, initializer=b_init)
            bias_g = vs.get_variable(
                "bias_g", [self._hidden_size], dtype=tf.float32)
            bias_c = vs.get_variable(
                "bias_c", [self._hidden_size], dtype=tf.float32, initializer=mod_b_init)

            r_tmp = U_rx + W_rh + bias_r
            g_tmp = U_gx + W_gh + bias_g
            r = math_ops.sigmoid(r_tmp)

            g = math_ops.sigmoid(g_tmp)

            Unitaryh = _eunn_loop(
                state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
            c = modrelu(math_ops.multiply(r, Unitaryh) + U_cx, bias_c, False)
            new_state = math_ops.multiply(
                g, state) + math_ops.multiply(1 - g, c)

        return new_state, new_state 
开发者ID:rdangovs,项目名称:rotational-unit-of-memory,代码行数:41,代码来源:GORU.py

示例10: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "eunn_cell"):

            state = _eunn_loop(state, self._capacity, self.diag_vec,
                               self.off_vec, self.diag, self._fft)

            input_matrix_init = init_ops.random_uniform_initializer(
                -0.01, 0.01)
            if self._comp:
                input_matrix_re = vs.get_variable("U_re", [inputs.get_shape(
                )[-1], self._hidden_size], initializer=input_matrix_init)
                input_matrix_im = vs.get_variable("U_im", [inputs.get_shape(
                )[-1], self._hidden_size], initializer=input_matrix_init)
                inputs_re = math_ops.matmul(inputs, input_matrix_re)
                inputs_im = math_ops.matmul(inputs, input_matrix_im)
                inputs = math_ops.complex(inputs_re, inputs_im)
            else:
                input_matrix = vs.get_variable(
                    "U", [inputs.get_shape()[-1], self._hidden_size], initializer=input_matrix_init)
                inputs = math_ops.matmul(inputs, input_matrix)

            bias = vs.get_variable(
                "modReLUBias", [self._hidden_size], initializer=init_ops.constant_initializer())
            output = self._activation((inputs + state), bias, self._comp)

        return output, output 
开发者ID:rdangovs,项目名称:rotational-unit-of-memory,代码行数:28,代码来源:EUNN.py

示例11: __call__

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def __call__(self, inputs, state, scope=None):
        with vs.variable_scope(scope or "goru_cell"):
            U_init = init_ops.random_uniform_initializer(-0.01, 0.01)
            b_init = init_ops.constant_initializer(2.)
            mod_b_init = init_ops.constant_initializer(0.01)

            U = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size * 3], dtype=tf.float32,
                                initializer=U_init)
            Ux = math_ops.matmul(inputs, U)
            U_cx, U_rx, U_gx = array_ops.split(Ux, 3, axis=1)

            W_r = vs.get_variable("W_r", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer=U_init)
            W_g = vs.get_variable("W_g", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer=U_init)
            W_rh = math_ops.matmul(state, W_r)
            W_gh = math_ops.matmul(state, W_g)

            bias_r = vs.get_variable("bias_r", [self._hidden_size], dtype=tf.float32, initializer=b_init)
            bias_g = vs.get_variable("bias_g", [self._hidden_size], dtype=tf.float32)
            bias_c = vs.get_variable("bias_c", [self._hidden_size], dtype=tf.float32, initializer=mod_b_init)

            r_tmp = U_rx + W_rh + bias_r
            g_tmp = U_gx + W_gh + bias_g
            r = math_ops.sigmoid(r_tmp)

            g = math_ops.sigmoid(g_tmp)

            Unitaryh = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
            c = modrelu(math_ops.multiply(r, Unitaryh) + U_cx, bias_c, False)
            new_state = math_ops.multiply(g, state) + math_ops.multiply(1 - g, c)

        return new_state, new_state 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:33,代码来源:GORU.py

示例12: random_uniform_variable

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_uniform_initializer [as 别名]
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
  """Instantiates a variable with values drawn from a uniform distribution.

  Arguments:
      shape: Tuple of integers, shape of returned Keras variable.
      low: Float, lower boundary of the output interval.
      high: Float, upper boundary of the output interval.
      dtype: String, dtype of returned Keras variable.
      name: String, name of returned Keras variable.
      seed: Integer, random seed.

  Returns:
      A Keras variable, filled with drawn samples.

  Example:
  ```python
      # TensorFlow example
      >>> kvar = K.random_uniform_variable((2,3), 0, 1)
      >>> kvar
      <tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
      >>> K.eval(kvar)
      array([[ 0.10940075,  0.10047495,  0.476143  ],
             [ 0.66137183,  0.00869417,  0.89220798]], dtype=float32)
  ```
  """
  if dtype is None:
    dtype = floatx()
  tf_dtype = _convert_string_dtype(dtype)
  if seed is None:
    # ensure that randomness is conditioned by the Numpy RNG
    seed = np.random.randint(10e8)
  value = init_ops.random_uniform_initializer(
      low, high, dtype=tf_dtype, seed=seed)(shape)
  return variable(value, dtype=dtype, name=name) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:36,代码来源:backend.py


注:本文中的tensorflow.python.ops.init_ops.random_uniform_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。