当前位置: 首页>>代码示例>>Python>>正文


Python v1.random_uniform_initializer方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.random_uniform_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python v1.random_uniform_initializer方法的具体用法?Python v1.random_uniform_initializer怎么用?Python v1.random_uniform_initializer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.random_uniform_initializer方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_graph

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def build_graph(is_training,
                hparams,
                placeholders=None,
                direct_inputs=None,
                use_placeholders=True):
  """Builds the model graph."""
  if placeholders is None and use_placeholders:
    placeholders = get_placeholders(hparams)
  initializer = tf.random_uniform_initializer(-hparams.init_scale,
                                              hparams.init_scale)
  with tf.variable_scope('model', reuse=None, initializer=initializer):
    graph = CoconetGraph(
        is_training=is_training,
        hparams=hparams,
        placeholders=placeholders,
        direct_inputs=direct_inputs,
        use_placeholders=use_placeholders)
  return graph 
开发者ID:magenta,项目名称:magenta,代码行数:20,代码来源:lib_graph.py

示例2: layer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def layer(input_layer, num_next_neurons, is_output=False):
    num_prev_neurons = int(input_layer.shape[1])
    shape = [num_prev_neurons, num_next_neurons]
    
    if is_output:
        weight_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
        bias_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
    else:
        # 1/sqrt(f)
        fan_in_init = 1 / num_prev_neurons ** 0.5
        weight_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
        bias_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init) 

    weights = tf.get_variable("weights", shape, initializer=weight_init)
    biases = tf.get_variable("biases", [num_next_neurons], initializer=bias_init)

    dot = tf.matmul(input_layer, weights) + biases

    if is_output:
        return dot

    relu = tf.nn.relu(dot)
    return relu 
开发者ID:andrew-j-levy,项目名称:Hierarchical-Actor-Critc-HAC-,代码行数:25,代码来源:utils.py

示例3: layer_goal_nn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def layer_goal_nn(input_layer, num_next_neurons, is_output=False):
    num_prev_neurons = int(input_layer.shape[1])
    shape = [num_prev_neurons, num_next_neurons]
    
    
    fan_in_init = 1 / num_prev_neurons ** 0.5
    weight_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init)
    bias_init = tf.random_uniform_initializer(minval=-fan_in_init, maxval=fan_in_init) 

    weights = tf.get_variable("weights", shape, initializer=weight_init)
    biases = tf.get_variable("biases", [num_next_neurons], initializer=bias_init)

    dot = tf.matmul(input_layer, weights) + biases

    if is_output:
        return dot

    relu = tf.nn.relu(dot)
    return relu


# Below function prints out options and environment specified by user 
开发者ID:andrew-j-levy,项目名称:Hierarchical-Actor-Critc-HAC-,代码行数:24,代码来源:utils.py

示例4: get_variable_initializer

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def get_variable_initializer(hparams):
  """Get variable initializer from hparams."""
  if not hparams.initializer:
    return None

  mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN,
                               value=hparams.initializer_gain,
                               hparams=hparams)

  if not tf.executing_eagerly():
    tf.logging.info("Using variable initializer: %s", hparams.initializer)
  if hparams.initializer == "orthogonal":
    return tf.orthogonal_initializer(gain=hparams.initializer_gain)
  elif hparams.initializer == "uniform":
    max_val = 0.1 * hparams.initializer_gain
    return tf.random_uniform_initializer(-max_val, max_val)
  elif hparams.initializer == "normal_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="normal")
  elif hparams.initializer == "uniform_unit_scaling":
    return tf.variance_scaling_initializer(
        hparams.initializer_gain, mode="fan_avg", distribution="uniform")
  elif hparams.initializer == "xavier":
    return tf.initializers.glorot_uniform()
  else:
    raise ValueError("Unrecognized initializer: %s" % hparams.initializer) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:28,代码来源:optimize.py

示例5: __call__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def __call__(self, reduced_dims, new_dims):
    fan_in = mtf.list_product(d.size for d in reduced_dims)
    fan_out = mtf.list_product(d.size for d in new_dims)
    scale = self.scale
    if self.mode == "fan_in":
      if not unit_scaling_convention():
        scale /= max(1., fan_in)
    elif self.mode == "fan_out":
      if unit_scaling_convention():
        raise ValueError("Unit scaling convention only works with \"fan_in\"")
      scale /= max(1., fan_out)
    elif self.mode == "fan_avg":
      if unit_scaling_convention():
        raise ValueError("Unit scaling convention only works with \"fan_in\"")
      scale /= max(1., float(fan_in + fan_out) / 2)
    else:
      raise ValueError(
          "Invalid `mode` argument: "
          "expected on of {\"fan_in\", \"fan_out\", \"fan_avg\"} "
          "but got %s" % (self.mode,))
    stddev = scale ** 0.5
    if self.distribution == "normal":
      return tf.truncated_normal_initializer(stddev=stddev)
    elif self.distribution == "uniform":
      limit = stddev * 3. ** 0.5
      return tf.random_uniform_initializer(minval=-limit, maxval=limit)
    else:
      raise ValueError("Invalid `distribution` argument: "
                       "expected one of {\"normal\", \"uniform\"} "
                       "but got %s" % (self.distribution,)) 
开发者ID:tensorflow,项目名称:mesh,代码行数:32,代码来源:layers.py

示例6: _createStackBidirectionalDynamicRNN

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def _createStackBidirectionalDynamicRNN(self,
                                          use_gpu,
                                          use_shape,
                                          use_state_tuple,
                                          initial_states_fw=None,
                                          initial_states_bw=None,
                                          scope=None):
    self.layers = [2, 3]
    input_size = 5
    batch_size = 2
    max_length = 8

    initializer = tf.random_uniform_initializer(
        -0.01, 0.01, seed=self._seed)
    sequence_length = tf.placeholder(tf.int64)

    self.cells_fw = [
        rnn_cell.LSTMCell(  # pylint:disable=g-complex-comprehension
            num_units,
            input_size,
            initializer=initializer,
            state_is_tuple=False) for num_units in self.layers
    ]
    self.cells_bw = [
        rnn_cell.LSTMCell(  # pylint:disable=g-complex-comprehension
            num_units,
            input_size,
            initializer=initializer,
            state_is_tuple=False) for num_units in self.layers
    ]

    inputs = max_length * [
        tf.placeholder(
            tf.float32,
            shape=(batch_size, input_size) if use_shape else (None, input_size))
    ]
    inputs_c = tf.stack(inputs)
    inputs_c = tf.transpose(inputs_c, [1, 0, 2])
    outputs, st_fw, st_bw = contrib_rnn.stack_bidirectional_dynamic_rnn(
        self.cells_fw,
        self.cells_bw,
        inputs_c,
        initial_states_fw=initial_states_fw,
        initial_states_bw=initial_states_bw,
        dtype=tf.float32,
        sequence_length=sequence_length,
        scope=scope)

    # Outputs has shape (batch_size, max_length, 2* layer[-1].
    output_shape = [None, max_length, 2 * self.layers[-1]]
    if use_shape:
      output_shape[0] = batch_size

    self.assertAllEqual(outputs.get_shape().as_list(), output_shape)

    input_value = np.random.randn(batch_size, input_size)

    return input_value, inputs, outputs, st_fw, st_bw, sequence_length 
开发者ID:magenta,项目名称:magenta,代码行数:60,代码来源:rnn_test.py

示例7: testLSTMBasicToBlockCell

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def testLSTMBasicToBlockCell(self):
    with self.session(use_gpu=True) as sess:
      x = tf.zeros([1, 2])
      x_values = np.random.randn(1, 2)

      m0_val = 0.1 * np.ones([1, 2])
      m1_val = -0.1 * np.ones([1, 2])
      m2_val = -0.2 * np.ones([1, 2])
      m3_val = 0.2 * np.ones([1, 2])

      initializer = tf.random_uniform_initializer(
          -0.01, 0.01, seed=19890212)
      with tf.variable_scope("basic", initializer=initializer):
        m0 = tf.zeros([1, 2])
        m1 = tf.zeros([1, 2])
        m2 = tf.zeros([1, 2])
        m3 = tf.zeros([1, 2])
        g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
            [rnn_cell.BasicLSTMCell(2, state_is_tuple=True) for _ in range(2)],
            state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
        sess.run([tf.global_variables_initializer()])
        basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
            x.name: x_values,
            m0.name: m0_val,
            m1.name: m1_val,
            m2.name: m2_val,
            m3.name: m3_val
        })

      with tf.variable_scope("block", initializer=initializer):
        m0 = tf.zeros([1, 2])
        m1 = tf.zeros([1, 2])
        m2 = tf.zeros([1, 2])
        m3 = tf.zeros([1, 2])
        g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
            [contrib_rnn.LSTMBlockCell(2)
             for _ in range(2)], state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
        sess.run([tf.global_variables_initializer()])
        block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
            x.name: x_values,
            m0.name: m0_val,
            m1.name: m1_val,
            m2.name: m2_val,
            m3.name: m3_val
        })

      self.assertEqual(len(basic_res), len(block_res))
      for basic, block in zip(basic_res, block_res):
        self.assertAllClose(basic, block) 
开发者ID:magenta,项目名称:magenta,代码行数:51,代码来源:rnn_test.py

示例8: testLSTMBasicToBlockCellPeeping

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def testLSTMBasicToBlockCellPeeping(self):
    with self.session(use_gpu=True) as sess:
      x = tf.zeros([1, 2])
      x_values = np.random.randn(1, 2)

      m0_val = 0.1 * np.ones([1, 2])
      m1_val = -0.1 * np.ones([1, 2])
      m2_val = -0.2 * np.ones([1, 2])
      m3_val = 0.2 * np.ones([1, 2])

      initializer = tf.random_uniform_initializer(
          -0.01, 0.01, seed=19890212)
      with tf.variable_scope("basic", initializer=initializer):
        m0 = tf.zeros([1, 2])
        m1 = tf.zeros([1, 2])
        m2 = tf.zeros([1, 2])
        m3 = tf.zeros([1, 2])
        g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
            [
                rnn_cell.LSTMCell(2, use_peepholes=True, state_is_tuple=True)
                for _ in range(2)
            ],
            state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
        sess.run([tf.global_variables_initializer()])
        basic_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
            x.name: x_values,
            m0.name: m0_val,
            m1.name: m1_val,
            m2.name: m2_val,
            m3.name: m3_val
        })

      with tf.variable_scope("block", initializer=initializer):
        m0 = tf.zeros([1, 2])
        m1 = tf.zeros([1, 2])
        m2 = tf.zeros([1, 2])
        m3 = tf.zeros([1, 2])
        g, ((out_m0, out_m1), (out_m2, out_m3)) = rnn_cell.MultiRNNCell(
            [contrib_rnn.LSTMBlockCell(2, use_peephole=True) for _ in range(2)],
            state_is_tuple=True)(x, ((m0, m1), (m2, m3)))
        sess.run([tf.global_variables_initializer()])
        block_res = sess.run([g, out_m0, out_m1, out_m2, out_m3], {
            x.name: x_values,
            m0.name: m0_val,
            m1.name: m1_val,
            m2.name: m2_val,
            m3.name: m3_val
        })

      self.assertEqual(len(basic_res), len(block_res))
      for basic, block in zip(basic_res, block_res):
        self.assertAllClose(basic, block) 
开发者ID:magenta,项目名称:magenta,代码行数:54,代码来源:rnn_test.py

示例9: test_batchnorm_bounds

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def test_batchnorm_bounds(self, batchnorm_class, dtype, tol, is_training):
    batch_size = 11
    input_size = 7
    output_size = 5

    lb_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
    ub_in = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))
    lb_in, ub_in = tf.minimum(lb_in, ub_in), tf.maximum(lb_in, ub_in)
    nominal = tf.random_normal(dtype=dtype, shape=(batch_size, input_size))

    # Linear layer.
    w = tf.random_normal(dtype=dtype, shape=(input_size, output_size))
    b = tf.random_normal(dtype=dtype, shape=(output_size,))

    # Batch norm layer.
    epsilon = 1.e-2
    bn_initializers = {
        'beta': tf.random_normal_initializer(),
        'gamma': tf.random_uniform_initializer(.1, 3.),
        'moving_mean': tf.random_normal_initializer(),
        'moving_variance': tf.random_uniform_initializer(.1, 3.)
    }
    batchnorm_module = batchnorm_class(offset=True, scale=True, eps=epsilon,
                                       initializers=bn_initializers)
    # Connect the batchnorm module to the graph.
    batchnorm_module(tf.random_normal(dtype=dtype,
                                      shape=(batch_size, output_size)),
                     is_training=is_training)

    bounds_in = ibp.RelativeIntervalBounds(lb_in - nominal,
                                           ub_in - nominal, nominal)
    bounds_out = bounds_in.apply_linear(None, w, b)
    bounds_out = bounds_out.apply_batch_norm(
        batchnorm_module,
        batchnorm_module.mean if is_training else batchnorm_module.moving_mean,
        batchnorm_module.variance if is_training
        else batchnorm_module.moving_variance,
        batchnorm_module.gamma,
        batchnorm_module.beta,
        epsilon)
    lb_out, ub_out = bounds_out.lower, bounds_out.upper

    # Separately, calculate dual objective by adjusting the linear layer.
    wn, bn = layer_utils.combine_with_batchnorm(w, b, batchnorm_module)
    bounds_out_lin = bounds_in.apply_linear(None, wn, bn)
    lb_out_lin, ub_out_lin = bounds_out_lin.lower, bounds_out_lin.upper

    init_op = tf.global_variables_initializer()

    with self.test_session() as session:
      session.run(init_op)
      (lb_out_val, ub_out_val,
       lb_out_lin_val, ub_out_lin_val) = session.run((lb_out, ub_out,
                                                      lb_out_lin, ub_out_lin))
      self.assertAllClose(lb_out_val, lb_out_lin_val, atol=tol, rtol=tol)
      self.assertAllClose(ub_out_val, ub_out_lin_val, atol=tol, rtol=tol) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:58,代码来源:relative_bounds_test.py

示例10: _compute_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import random_uniform_initializer [as 别名]
def _compute_logits(self, rnn_out):
    if self._num_layers == 1 and self._weights is not None:
      assert tensor_utils.shape(rnn_out, -1) == self._hidden_dim

    if self._num_layers == 1:
      with tf.variable_scope("mlp1", reuse=self._reuse):
        if self._weights is None:
          scale = (3.0 / self._hidden_dim) ** 0.5
          weight_initializer = tf.random_uniform_initializer(
              minval=-scale, maxval=scale)
          self._linear1 = Linear(
              rnn_out,
              self._output_size,
              True, weights=None,
              weight_initializer=weight_initializer)
        else:
          self._linear1 = Linear(
              rnn_out, self._output_size, True, weights=self._weights)
        logits = self._linear1(rnn_out)
    else:
      assert False
      assert self._num_layers == 2
      with tf.variable_scope("mlp1", reuse=self._reuse):
        if self._linear1 is None:
          self._linear1 = Linear(
              rnn_out, self._hidden_dim, True,
              weights=None,
              weight_initializer=tf.contrib.layers.xavier_initializer())
        hidden = self._linear1(rnn_out)
        if self._activation:
          hidden = self._activation(hidden)

        if self._mode == tf.estimator.ModeKeys.TRAIN and self._dropout > 0.:
          hidden = tf.nn.dropout(hidden, keep_prob=1.-self._dropout)

      with tf.variable_scope("mlp2", reuse=self._reuse):
        if self._linear2 is None:
          if self._weights is None:
            scale = (3.0 / self._hidden_dim) ** 0.5
            weight_initializer = tf.random_uniform_initializer(
                minval=-scale, maxval=scale)
            self._linear2 = Linear(
                hidden,
                self._output_size,
                True, weights=None,
                weight_initializer=weight_initializer)
          else:
            self._linear2 = Linear(
                hidden, self._output_size, True, weights=self._weights)

        logits = self._linear2(hidden)
    return logits 
开发者ID:google-research,项目名称:language,代码行数:54,代码来源:output_wrapper.py


注:本文中的tensorflow.compat.v1.random_uniform_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。