当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.random_normal_initializer方法代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.random_normal_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python init_ops.random_normal_initializer方法的具体用法?Python init_ops.random_normal_initializer怎么用?Python init_ops.random_normal_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.random_normal_initializer方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: embed_labels

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def embed_labels(inputs, num_classes, output_dim, sn,
                 weight_decay_rate=1e-5,
                 reuse=None, scope=None):
    # TODO move regularizer definitions to model
    weights_regularizer = ly.l2_regularizer(weight_decay_rate)

    with tf.variable_scope(scope, 'embedding', [inputs], reuse=reuse) as sc:
        inputs = tf.convert_to_tensor(inputs)

        weights = tf.get_variable(name="weights", shape=(num_classes, output_dim),
                                  initializer=init_ops.random_normal_initializer)

        # Spectral Normalization
        if sn:
            weights = spectral_normed_weight(weights, num_iters=1, update_collection=Config.SPECTRAL_NORM_UPDATE_OPS)

        embed_out = tf.nn.embedding_lookup(weights, inputs)

    return embed_out 
开发者ID:SketchyScene,项目名称:SketchySceneColorization,代码行数:21,代码来源:mru.py

示例2: random_normal_variable

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
                           seed=None):
  """Instantiates a variable with values drawn from a normal distribution.

  Arguments:
      shape: Tuple of integers, shape of returned Keras variable.
      mean: Float, mean of the normal distribution.
      scale: Float, standard deviation of the normal distribution.
      dtype: String, dtype of returned Keras variable.
      name: String, name of returned Keras variable.
      seed: Integer, random seed.

  Returns:
      A Keras variable, filled with drawn samples.

  Example:
  ```python
      # TensorFlow example
      >>> kvar = K.random_normal_variable((2,3), 0, 1)
      >>> kvar
      <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
      >>> K.eval(kvar)
      array([[ 1.19591331,  0.68685907, -0.63814116],
             [ 0.92629528,  0.28055015,  1.70484698]], dtype=float32)
  ```
  """
  if dtype is None:
    dtype = floatx()
  shape = tuple(map(int, shape))
  tf_dtype = _convert_string_dtype(dtype)
  if seed is None:
    # ensure that randomness is conditioned by the Numpy RNG
    seed = np.random.randint(10e8)
  value = init_ops.random_normal_initializer(
      mean, scale, dtype=tf_dtype, seed=seed)(shape)
  return variable(value, dtype=dtype, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:backend.py

示例3: random_normal_variable

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
                           seed=None):
  """Instantiates a variable with values drawn from a normal distribution.

  Arguments:
      shape: Tuple of integers, shape of returned Keras variable.
      mean: Float, mean of the normal distribution.
      scale: Float, standard deviation of the normal distribution.
      dtype: String, dtype of returned Keras variable.
      name: String, name of returned Keras variable.
      seed: Integer, random seed.

  Returns:
      A Keras variable, filled with drawn samples.

  Example:
  ```python
      # TensorFlow example
      >>> kvar = K.random_normal_variable((2,3), 0, 1)
      >>> kvar
      <tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
      >>> K.eval(kvar)
      array([[ 1.19591331,  0.68685907, -0.63814116],
             [ 0.92629528,  0.28055015,  1.70484698]], dtype=float32)
  ```
  """
  if dtype is None:
    dtype = floatx()
  tf_dtype = _convert_string_dtype(dtype)
  if seed is None:
    # ensure that randomness is conditioned by the Numpy RNG
    seed = np.random.randint(10e8)
  value = init_ops.random_normal_initializer(
      mean, scale, dtype=tf_dtype, seed=seed)(shape)
  return variable(value, dtype=dtype, name=name) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:37,代码来源:backend.py

示例4: linear_regression

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
  """Creates linear regression TensorFlow subgraph.

  Args:
    x: tensor or placeholder for input features.
    y: tensor or placeholder for labels.
    init_mean: the mean value to use for initialization.
    init_stddev: the standard devation to use for initialization.

  Returns:
    Predictions and loss tensors.

  Side effects:
    The variables linear_regression.weights and linear_regression.bias are
    initialized as follows.  If init_mean is not None, then initialization
    will be done using a random normal initializer with the given init_mean
    and init_stddv.  (These may be set to 0.0 each if a zero initialization
    is desirable for convex use cases.)  If init_mean is None, then the
    uniform_unit_scaling_initialzer will be used.
  """
  with vs.variable_scope('linear_regression'):
    scope_name = vs.get_variable_scope().name
    summary.histogram('%s.x' % scope_name, x)
    summary.histogram('%s.y' % scope_name, y)
    dtype = x.dtype.base_dtype
    y_shape = y.get_shape()
    if len(y_shape) == 1:
      output_shape = 1
    else:
      output_shape = y_shape[1]
    # Set up the requested initialization.
    if init_mean is None:
      weights = vs.get_variable(
          'weights', [x.get_shape()[1], output_shape], dtype=dtype)
      bias = vs.get_variable('bias', [output_shape], dtype=dtype)
    else:
      weights = vs.get_variable(
          'weights', [x.get_shape()[1], output_shape],
          initializer=init_ops.random_normal_initializer(
              init_mean, init_stddev, dtype=dtype),
          dtype=dtype)
      bias = vs.get_variable(
          'bias', [output_shape],
          initializer=init_ops.random_normal_initializer(
              init_mean, init_stddev, dtype=dtype),
          dtype=dtype)
    summary.histogram('%s.weights' % scope_name, weights)
    summary.histogram('%s.bias' % scope_name, bias)
    return losses_ops.mean_squared_error_regressor(x, y, weights, bias) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:51,代码来源:models.py

示例5: build

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def build(self, inputs_shape):
    if inputs_shape[1].value is None:
      raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s"
                       % inputs_shape)

    input_depth = inputs_shape[1].value
    if self._input_initializer is None:
      self._input_initializer = init_ops.random_normal_initializer(mean=0.0,
                                                                   stddev=0.001)
    self._input_kernel = self.add_variable(
        "input_kernel",
        shape=[input_depth, self._num_units],
        initializer=self._input_initializer)

    if self._recurrent_initializer is None:
      self._recurrent_initializer = init_ops.constant_initializer(1.)
    self._recurrent_kernel = self.add_variable(
        "recurrent_kernel",
        shape=[self._num_units],
        initializer=self._recurrent_initializer)

    # Clip the absolute values of the recurrent weights to the specified minimum
    if self._recurrent_min_abs:
      abs_kernel = math_ops.abs(self._recurrent_kernel)
      min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
      self._recurrent_kernel = math_ops.multiply(
          math_ops.sign(self._recurrent_kernel),
          min_abs_kernel
      )

    # Clip the absolute values of the recurrent weights to the specified maximum
    if self._recurrent_max_abs:
      self._recurrent_kernel = clip_ops.clip_by_value(self._recurrent_kernel,
                                                      -self._recurrent_max_abs,
                                                      self._recurrent_max_abs)

    self._bias = self.add_variable(
        "bias",
        shape=[self._num_units],
        initializer=init_ops.zeros_initializer(dtype=self.dtype))

    self.built = True 
开发者ID:batzner,项目名称:indrnn,代码行数:44,代码来源:ind_rnn_cell.py

示例6: linear_regression

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
  """Creates linear regression TensorFlow subgraph.

  Args:
    x: tensor or placeholder for input features.
    y: tensor or placeholder for labels.
    init_mean: the mean value to use for initialization.
    init_stddev: the standard devation to use for initialization.

  Returns:
    Predictions and loss tensors.

  Side effects:
    The variables linear_regression.weights and linear_regression.bias are
    initialized as follows.  If init_mean is not None, then initialization
    will be done using a random normal initializer with the given init_mean
    and init_stddv.  (These may be set to 0.0 each if a zero initialization
    is desirable for convex use cases.)  If init_mean is None, then the
    uniform_unit_scaling_initialzer will be used.
  """
  with vs.variable_scope('linear_regression'):
    scope_name = vs.get_variable_scope().name
    summary.histogram('%s.x' % scope_name, x)
    summary.histogram('%s.y' % scope_name, y)
    dtype = x.dtype.base_dtype
    y_shape = y.get_shape()
    if len(y_shape) == 1:
      output_shape = 1
    else:
      output_shape = y_shape[1]
    # Set up the requested initialization.
    if init_mean is None:
      weights = vs.get_variable(
          'weights', [x.get_shape()[1], output_shape], dtype=dtype)
      bias = vs.get_variable('bias', [output_shape], dtype=dtype)
    else:
      weights = vs.get_variable('weights', [x.get_shape()[1], output_shape],
                                initializer=init_ops.random_normal_initializer(
                                    init_mean, init_stddev, dtype=dtype),
                                dtype=dtype)
      bias = vs.get_variable('bias', [output_shape],
                             initializer=init_ops.random_normal_initializer(
                                 init_mean, init_stddev, dtype=dtype),
                             dtype=dtype)
    summary.histogram('%s.weights' % scope_name, weights)
    summary.histogram('%s.bias' % scope_name, bias)
    return losses_ops.mean_squared_error_regressor(x, y, weights, bias) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:49,代码来源:models.py

示例7: build

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import random_normal_initializer [as 别名]
def build(self, inputs_shape):
        '''construct the IndRNN Cell'''
        if inputs_shape[1].value is None:
            raise ValueError("Expected input shape[1] is known")

        input_depth = inputs_shape[1]
        if self._input_kernel_initializer is None:
            self._input_kernel_initializer = init_ops.random_normal_initializer(mean=0,
                                                                                stddev=1e-3)
        # matrix W
        self._input_kernel = self.add_variable(
            "input_kernel",
            shape=[input_depth, self._num_units],
            initializer=self._input_kernel_initializer
        )

        if self._recurrent_recurrent_kernel_initializer is None:
            self._recurrent_recurrent_kernel_initializer = init_ops.constant_initializer(1.)

        # matrix U
        self._recurrent_kernel = self.add_variable(
            "recurrent_kernel",
            shape=[self._num_units],
            initializer=self._recurrent_recurrent_kernel_initializer
        )

        # Clip the U to min - max
        if self._recurrent_min_abs:
            abs_kernel = math_ops.abs(self._recurrent_kernel)
            min_abs_kernel = math_ops.maximum(abs_kernel, self._recurrent_min_abs)
            self._recurrent_kernel = math_ops.multiply(
                math_ops.sign(self._recurrent_kernel),
                min_abs_kernel
            )
        if self._recurrent_max_abs:
            self._recurrent_kernel = clip_ops.clip_by_value(
                self._recurrent_kernel,
                -self._recurrent_max_abs,
                self._recurrent_max_abs
            )

        self._bias = self.add_variable(
            "bias",
            shape=[self._num_units],
            initializer=init_ops.zeros_initializer(dtype=self.dtype)
        )
        # built finished
        self.built = True 
开发者ID:TobiasLee,项目名称:Text-Classification,代码行数:50,代码来源:indRNN.py


注:本文中的tensorflow.python.ops.init_ops.random_normal_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。