当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.constant_initializer函数代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.constant_initializer函数的典型用法代码示例。如果您正苦于以下问题:Python constant_initializer函数的具体用法?Python constant_initializer怎么用?Python constant_initializer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了constant_initializer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_works_correctly_vector_of_vars

  def test_works_correctly_vector_of_vars(self):
    with self.test_session() as sess:
      x = variable_scope.get_variable(
          name="x",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(2))
      y = variable_scope.get_variable(
          name="y",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(3))
      sess.run([variables.global_variables_initializer()])

      f = lambda z: z[0] * z[1]
      g = lambda z: z[0]**2 * z[1]**2 / 2

      z = array_ops.stack([x, y])
      fz = cg.custom_gradient(f(z), g(z), z, axis=0)
      gz = gradients_impl.gradients(fz, variables.trainable_variables())
      [z_, fz_, gx_, gy_] = sess.run([z, fz, gz[0], gz[1]])

      self.assertEqual(f(z_), fz_)
      self.assertEqual(g(z_), gx_)
      self.assertEqual(g(z_), gy_)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:25,代码来源:custom_grad_test.py

示例2: test_works_correctly_fx_gx_manually_stopped

  def test_works_correctly_fx_gx_manually_stopped(self):
    with self.test_session() as sess:
      x_ = np.float32(2.1)  # Adding extra tenth to force imprecision.
      y_ = np.float32(3.1)
      x = variable_scope.get_variable(
          name="x",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(x_))
      y = variable_scope.get_variable(
          name="y",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(y_))
      sess.run([variables.global_variables_initializer()])

      stop = array_ops.stop_gradient  # For readability.

      # Basically we need to stop the `x` portion of `f`. And when we supply the
      # arg to `custom_gradient` we need to stop the complement, i.e., the `y`
      # part.
      f = lambda x: stop(x) * y
      g = lambda x: stop(math_ops.square(x)) * y
      fx = cg.custom_gradient(f(x), g(x), x + stop(y),
                              fx_gx_manually_stopped=True)

      gx = gradients_impl.gradients(fx, variables.trainable_variables())
      [x_, fx_, gx_, gy_] = sess.run([x, fx, gx[0], gx[1]])

      self.assertEqual(x_ * y_, fx_)
      self.assertEqual(np.square(x_) * y_, gx_)
      self.assertEqual(x_, gy_)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:32,代码来源:custom_grad_test.py

示例3: test_works_correctly_side_vars

  def test_works_correctly_side_vars(self):
    with self.test_session() as sess:
      x_ = np.float32(2.1)  # Adding extra tenth to force imprecision.
      y_ = np.float32(3.1)
      x = variable_scope.get_variable(
          name="x",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(x_))
      y = variable_scope.get_variable(
          name="y",
          shape=[],
          dtype=dtypes.float32,
          initializer=init_ops.constant_initializer(y_))
      sess.run([variables.global_variables_initializer()])

      f = lambda x: x * y
      g = lambda z: math_ops.square(x) * y

      fx = cg.custom_gradient(f(x), g(x), x)
      gx = gradients_impl.gradients(fx, variables.trainable_variables())
      [x_, fx_, gx_] = sess.run([x, fx, gx[0]])
      gy_ = gx[1]

      self.assertEqual(x_ * y_, fx_)
      self.assertEqual(np.square(x_) * y_, gx_)
      self.assertEqual(None, gy_)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:custom_grad_test.py

示例4: __init__

 def __init__(self, max_id, shortlist_size=100, name_prefix=''):
   """Creates a new TopN."""
   self.shortlist_size = shortlist_size
   # id_to_score contains all the scores we are tracking.
   self.id_to_score = variable_scope.get_variable(
       name=name_prefix + 'id_to_score',
       dtype=dtypes.float32,
       shape=[max_id],
       initializer=init_ops.constant_initializer(dtypes.float32.min))
   # sl_ids and sl_scores together satisfy four invariants:
   # 1) If sl_ids[i] != -1, then
   #    id_to_score[sl_ids[i]] = sl_scores[i] >= sl_scores[0]
   # 2) sl_ids[0] is the number of i > 0 for which sl_ids[i] != -1.
   # 3) If id_to_score[i] > sl_scores[0], then
   #    sl_ids[j] = i for some j.
   # 4) If sl_ids[i] == -1, then sl_scores[i] = tf.float32.min.
   self.sl_ids = variable_scope.get_variable(
       name=name_prefix + 'shortlist_ids',
       dtype=dtypes.int64,
       shape=[shortlist_size + 1],
       initializer=init_ops.constant_initializer(-1))
   # Ideally, we would set self.sl_ids[0] = 0 here.  But then it is hard
   # to pass that control dependency to the other other Ops.  Instead, we
   # have insert, remove and get_best all deal with the fact that
   # self.sl_ids[0] == -1 actually means the shortlist size is 0.
   self.sl_scores = variable_scope.get_variable(
       name=name_prefix + 'shortlist_scores',
       dtype=dtypes.float32,
       shape=[shortlist_size + 1],
       initializer=init_ops.constant_initializer(dtypes.float32.min))
   # TopN keeps track of its internal data dependencies, so the user
   # doesn't have to.
   self.last_ops = []
开发者ID:AliMiraftab,项目名称:tensorflow,代码行数:33,代码来源:topn.py

示例5: testInvalidGlobalStep

 def testInvalidGlobalStep(self):
   with ops.Graph().as_default() as g, self.test_session(graph=g):
     x = array_ops.placeholder(dtypes.float32, [])
     var = variable_scope.get_variable(
         "test", [], initializer=init_ops.constant_initializer(10))
     loss = math_ops.abs(var * x)
     with self.assertRaises(AttributeError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=constant_op.constant(
               43, dtype=dtypes.int64),
           learning_rate=0.1,
           optimizer="SGD")
     with self.assertRaises(TypeError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=variable_scope.get_variable(
               "global_step", [],
               trainable=False,
               dtype=dtypes.float64,
               initializer=init_ops.constant_initializer(
                   0.0, dtype=dtypes.float64)),
           learning_rate=0.1,
           optimizer="SGD")
     with self.assertRaises(ValueError):
       optimizers_lib.optimize_loss(
           loss,
           global_step=variable_scope.get_variable(
               "global_step", [1],
               trainable=False,
               dtype=dtypes.int64,
               initializer=init_ops.constant_initializer(
                   [0], dtype=dtypes.int64)),
           learning_rate=0.1,
           optimizer="SGD")
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:35,代码来源:optimizers_test.py

示例6: _init_input2hidden

def _init_input2hidden(ops, rnn_mode, input_mode, W_init, input_dims, hidden_dims):
  # N represent the number of gates
  if 'rnn' in rnn_mode:
    N = 1
    msg = '(W_hid)'
  elif rnn_mode == 'gru':
    N = 3
    msg = '(W_input_to_updategate, W_input_to_resetgate, W_input_to_hiddenupdate)'
  elif rnn_mode == 'lstm':
    N = 4
    msg = '(W_input_to_inputgate, W_input_to_forgetgate, W_input_to_hidden, W_input_to_outputgate)'
  # ====== check input ====== #
  if input_mode != 'skip':
    ops.get_variable_nnop(initializer=W_init, shape=(input_dims, hidden_dims * N),
                     name='W_in', roles=Weight)
    if input_mode == 'norm':
      ops.get_variable_nnop(initializer=init_ops.constant_initializer(0.), shape=(hidden_dims * N,),
                            name='beta', roles=BatchNormShiftParameter)
      ops.get_variable_nnop(initializer=init_ops.constant_initializer(1.), shape=(hidden_dims * N,),
                            name='gamma', roles=BatchNormScaleParameter)
      ops.get_variable_nnop(initializer=init_ops.constant_initializer(0.), shape=(hidden_dims * N,),
                            name='mean', roles=BatchNormPopulationMean)
      ops.get_variable_nnop(initializer=init_ops.constant_initializer(1.), shape=(hidden_dims * N,),
                            name='inv_std', roles=BatchNormPopulationInvStd)
  # skip input mode
  elif input_dims != hidden_dims and \
  input_dims != hidden_dims * N: # 3 gates + 1 hid_update
    raise Exception('Skip input mode, input trailing_dimension=%d '
                    '(the final dim) must equal to the number of hidden '
                    'units (tied input connection), or %d-th the number '
                    'of hidden units = %d, which include: ' + msg %
                    (input_dims, N, hidden_dims * N))
开发者ID:imito,项目名称:odin,代码行数:32,代码来源:rnn.py

示例7: testBasicLSTMCell

 def testBasicLSTMCell(self):
   for dtype in [dtypes.float16, dtypes.float32]:
     np_dtype = dtype.as_numpy_dtype
     with self.test_session(graph=ops.Graph()) as sess:
       with variable_scope.variable_scope(
           "root", initializer=init_ops.constant_initializer(0.5)):
         x = array_ops.zeros([1, 2], dtype=dtype)
         m = array_ops.zeros([1, 8], dtype=dtype)
         cell = rnn_cell_impl.MultiRNNCell(
             [
                 rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)
                 for _ in range(2)
             ],
             state_is_tuple=False)
         self.assertEqual(cell.dtype, None)
         g, out_m = cell(x, m)
         # Layer infers the input type.
         self.assertEqual(cell.dtype, dtype.name)
         expected_variable_names = [
             "root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
             rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_0/basic_lstm_cell/%s:0" %
             rnn_cell_impl._BIAS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
             rnn_cell_impl._WEIGHTS_VARIABLE_NAME,
             "root/multi_rnn_cell/cell_1/basic_lstm_cell/%s:0" %
             rnn_cell_impl._BIAS_VARIABLE_NAME
         ]
         self.assertEqual(expected_variable_names,
                          [v.name for v in cell.trainable_variables])
         self.assertFalse(cell.non_trainable_variables)
         sess.run([variables_lib.global_variables_initializer()])
         res = sess.run([g, out_m], {
             x.name: np.array([[1., 1.]]),
             m.name: 0.1 * np.ones([1, 8])
         })
         self.assertEqual(len(res), 2)
         variables = variables_lib.global_variables()
         self.assertEqual(expected_variable_names, [v.name for v in variables])
         # The numbers in results were not calculated, this is just a
         # smoke test.
         self.assertAllClose(res[0], np.array(
             [[0.240, 0.240]], dtype=np_dtype), 1e-2)
         expected_mem = np.array(
             [[0.689, 0.689, 0.448, 0.448, 0.398, 0.398, 0.240, 0.240]],
             dtype=np_dtype)
         self.assertAllClose(res[1], expected_mem, 1e-2)
       with variable_scope.variable_scope(
           "other", initializer=init_ops.constant_initializer(0.5)):
         # Test BasicLSTMCell with input_size != num_units.
         x = array_ops.zeros([1, 3], dtype=dtype)
         m = array_ops.zeros([1, 4], dtype=dtype)
         g, out_m = rnn_cell_impl.BasicLSTMCell(2, state_is_tuple=False)(x, m)
         sess.run([variables_lib.global_variables_initializer()])
         res = sess.run(
             [g, out_m], {
                 x.name: np.array([[1., 1., 1.]], dtype=np_dtype),
                 m.name: 0.1 * np.ones([1, 4], dtype=np_dtype)
             })
         self.assertEqual(len(res), 2)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:core_rnn_cell_test.py

示例8: __call__

  def __call__(self, x, h_prev, scope=None):
    """GRU cell."""
    with vs.variable_scope(scope or type(self).__name__):
      input_size = x.get_shape().with_rank(2)[1]

      # Check if the input size exist.
      if input_size is None:
        raise ValueError("Expecting input_size to be set.")

      # Check cell_size == state_size from h_prev.
      cell_size = h_prev.get_shape().with_rank(2)[1]
      if cell_size != self._cell_size:
        raise ValueError("Shape of h_prev[1] incorrect: cell_size %i vs %s" %
                         (self._cell_size, cell_size))

      if cell_size is None:
        raise ValueError("cell_size from `h_prev` should not be None.")

      w_ru = vs.get_variable("w_ru", [input_size + self._cell_size,
                                      self._cell_size * 2])
      b_ru = vs.get_variable(
          "b_ru", [self._cell_size * 2],
          initializer=init_ops.constant_initializer(1.0))
      w_c = vs.get_variable("w_c",
                            [input_size + self._cell_size, self._cell_size])
      b_c = vs.get_variable(
          "b_c", [self._cell_size],
          initializer=init_ops.constant_initializer(0.0))

      _gru_block_cell = gen_gru_ops.gru_block_cell  # pylint: disable=invalid-name
      _, _, _, new_h = _gru_block_cell(
          x=x, h_prev=h_prev, w_ru=w_ru, w_c=w_c, b_ru=b_ru, b_c=b_c)

      return new_h, new_h
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:34,代码来源:gru_ops.py

示例9: testTraining

  def testTraining(self):
    """Tests a gradient descent step for a simple model."""
    with self.test_session() as session:
      with self.test_scope():
        with variable_scope.variable_scope("ascope", use_resource=True):
          w = variable_scope.get_variable(
              "w",
              shape=[4, 2],
              dtype=dtypes.float32,
              initializer=init_ops.constant_initializer(
                  np.array([[1, 2], [3, 4], [5, 6], [7, 8]], dtype=np.float32)))
          b = variable_scope.get_variable(
              "b",
              shape=[2],
              dtype=dtypes.float32,
              initializer=init_ops.constant_initializer(
                  np.array([2, 3], dtype=np.float32)))

          x = array_ops.placeholder(dtypes.float32, shape=[1, 4])
          y = math_ops.matmul(x, w) + b
          loss = math_ops.reduce_sum(y)
          optimizer = GradientDescentOptimizer(0.1)
          train = optimizer.minimize(loss)

      session.run(variables.global_variables_initializer())
      session.run(train, {x: np.array([[7, 3, 5, 9]], dtype=np.float32)})
      vw, vb = session.run([w, b])
      self.assertAllClose(
          np.array(
              [[0.3, 1.3], [2.7, 3.7], [4.5, 5.5], [6.1, 7.1]],
              dtype=np.float32),
          vw,
          rtol=1e-4)
      self.assertAllClose(np.array([1.9, 2.9], dtype=np.float32), vb, rtol=1e-4)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:34,代码来源:variable_ops_test.py

示例10: _TestOptimizerSupportHelper

  def _TestOptimizerSupportHelper(self, opt):
    num_layers = 4
    num_units = 2
    batch_size = 8
    direction = CUDNN_RNN_UNIDIRECTION
    dir_count = 1

    with ops.Graph().as_default() as g:
      kernel_initializer = init_ops.constant_initializer(0.)
      bias_initializer = init_ops.constant_initializer(0.)
      inputs = random_ops.random_uniform([
          num_layers * dir_count, batch_size, num_units], dtype=dtypes.float32)

      lstm = cudnn_rnn.CudnnLSTM(num_layers, num_units,
                                 direction=direction,
                                 kernel_initializer=kernel_initializer,
                                 bias_initializer=bias_initializer,
                                 name="awesome_lstm")
      outputs, _ = lstm(inputs)
      loss = math_ops.reduce_sum(outputs)
      optimizer = self._GetOptimizer(opt)
      train_op = optimizer.minimize(loss)

    with self.test_session(use_gpu=True, graph=g) as sess:
      sess.run(variables.global_variables_initializer())
      sess.run(train_op)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:26,代码来源:cudnn_rnn_test.py

示例11: testIndyGRUCell

 def testIndyGRUCell(self):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         "root", initializer=init_ops.constant_initializer(0.5)):
       x = array_ops.zeros([1, 2])
       m = array_ops.zeros([1, 2])
       g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run([g], {
           x.name: np.array([[1., 1.]]),
           m.name: np.array([[0.1, 0.1]])
       })
       # Smoke test
       self.assertAllClose(res[0], [[0.185265, 0.17704]])
     with variable_scope.variable_scope(
         "other", initializer=init_ops.constant_initializer(0.5)):
       # Test IndyGRUCell with input_size != num_units.
       x = array_ops.zeros([1, 3])
       m = array_ops.zeros([1, 2])
       g, _ = contrib_rnn_cell.IndyGRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run([g], {
           x.name: np.array([[1., 1., 1.]]),
           m.name: np.array([[0.1, 0.1]])
       })
       # Smoke test
       self.assertAllClose(res[0], [[0.155127, 0.157328]])
开发者ID:Eagle732,项目名称:tensorflow,代码行数:27,代码来源:core_rnn_cell_test.py

示例12: testGRUCell

 def testGRUCell(self):
   with self.test_session() as sess:
     with variable_scope.variable_scope(
         "root", initializer=init_ops.constant_initializer(0.5)):
       x = array_ops.zeros([1, 2])
       m = array_ops.zeros([1, 2])
       g, _ = rnn_cell_impl.GRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run(
           [g], {x.name: np.array([[1., 1.]]),
                 m.name: np.array([[0.1, 0.1]])})
       # Smoke test
       self.assertAllClose(res[0], [[0.175991, 0.175991]])
     with variable_scope.variable_scope(
         "other", initializer=init_ops.constant_initializer(0.5)):
       x = array_ops.zeros(
           [1, 3])  # Test GRUCell with input_size != num_units.
       m = array_ops.zeros([1, 2])
       g, _ = rnn_cell_impl.GRUCell(2)(x, m)
       sess.run([variables_lib.global_variables_initializer()])
       res = sess.run(
           [g],
           {x.name: np.array([[1., 1., 1.]]),
            m.name: np.array([[0.1, 0.1]])})
       # Smoke test
       self.assertAllClose(res[0], [[0.156736, 0.156736]])
开发者ID:ggaziv,项目名称:tensorflow,代码行数:26,代码来源:core_rnn_cell_test.py

示例13: gru

def gru(cell_size, sequence_len, xs, name=None, scope=None):
  r"""gru

  args:
    sequence_len: a `tensor` of type `int64`.
    cell_size: an `int`.
    xs: a list of at least 1 `tensor` objects of type `float32`.
    name: a name for the operation (optional).

  returns:
    a tuple of `tensor` objects (rs, zs, rhs, gs, hs).
    rs: a list with the same number of `tensor` objects as `xs` of `tensor` objects of type `float32`.
    zs: a list with the same number of `tensor` objects as `xs` of `tensor` objects of type `float32`.
    rhs: a list with the same number of `tensor` objects as `xs` of `tensor` objects of type `float32`.
    gs: a list with the same number of `tensor` objects as `xs` of `tensor` objects of type `float32`.
    hs: a list with the same number of `tensor` objects as `xs` of `tensor` objects of type `float32`.
  """
  with vs.variable_scope(scope or "Gru"):
    input_size = xs[0].get_shape()[1].value

    wxr = vs.get_variable("wxr", [input_size, cell_size])
    whr = vs.get_variable("whr", [cell_size, cell_size])
    wxz = vs.get_variable("wxz", [input_size, cell_size])
    whz = vs.get_variable("whz", [cell_size, cell_size])
    wxh = vs.get_variable("wxh", [input_size, cell_size])
    whh = vs.get_variable("whh", [cell_size, cell_size])

    br = vs.get_variable("br", [cell_size], initializer=init_ops.constant_initializer(1.0))
    bz = vs.get_variable("bz", [cell_size], initializer=init_ops.constant_initializer(1.0))
    bh = vs.get_variable("bh", [cell_size], initializer=init_ops.constant_initializer(0.0))

    return gen_gru_ops._gru(cell_size=cell_size, sequence_len=sequence_len,
        wxr=wxr, whr=whr, wxz=wxz, whz=whz, wxh=wxh, whh=whh, br=br, bz=bz,
        bh=bh, xs=xs, name=name)
开发者ID:wchan,项目名称:tensorflow,代码行数:34,代码来源:gru_ops.py

示例14: gru_cell

def gru_cell(cell_size, sequence_len, h_prev, x, name=None, scope=None, time_idx=None):
  r"""GRU Cell

  Args:
    sequence_len: A `Tensor` of type `int64`.
    h_prev: A `Tensor` of type `float32`.
    x: A `Tensor` of type `float32`.
    cell_size: An `int`.
    name: A name for the operation (optional).

  Returns:
    A tuple of `Tensor` objects (r, z, rh, g, h).
    r: A `Tensor` of type `float32`.
    z: A `Tensor` of type `float32`.
    rh: A `Tensor` of type `float32`.
    g: A `Tensor` of type `float32`.
    h: A `Tensor` of type `float32`.
  """
  with vs.variable_scope(scope or "GruCell"):
    input_size = x.get_shape()[1].value

    wxr = vs.get_variable("wxr", [input_size, cell_size])
    whr = vs.get_variable("whr", [cell_size, cell_size])
    wxz = vs.get_variable("wxz", [input_size, cell_size])
    whz = vs.get_variable("whz", [cell_size, cell_size])
    wxh = vs.get_variable("wxh", [input_size, cell_size])
    whh = vs.get_variable("whh", [cell_size, cell_size])

    br = vs.get_variable("br", [cell_size], initializer=init_ops.constant_initializer(1.0))
    bz = vs.get_variable("bz", [cell_size], initializer=init_ops.constant_initializer(1.0))
    bh = vs.get_variable("bh", [cell_size], initializer=init_ops.constant_initializer(0.0))

    return gen_gru_ops._gru_cell(cell_size=cell_size, sequence_len=sequence_len,
        wxr=wxr, whr=whr, wxz=wxz, whz=whz, wxh=wxh, whh=whh, br=br, bz=bz,
        bh=bh, h_prev=h_prev, x=x, name=name, time_idx=time_idx)
开发者ID:wchan,项目名称:tensorflow,代码行数:35,代码来源:gru_ops.py

示例15: _norm

 def _norm(self, inp, scope):
   with vs.variable_scope(scope) as scope:
     shape = inp.get_shape()[-1:]
     gamma_init = init_ops.constant_initializer(self._g)
     beta_init = init_ops.constant_initializer(self._b)
     gamma = vs.get_variable("gamma", shape=shape, initializer=gamma_init)  # pylint: disable=unused-variable
     beta = vs.get_variable("beta", shape=shape, initializer=beta_init)  # pylint: disable=unused-variable
     normalized = layers.layer_norm(inp, reuse=True, scope=scope)
     return normalized
开发者ID:KalraA,项目名称:tensorflow,代码行数:9,代码来源:rnn_cell.py


注:本文中的tensorflow.python.ops.init_ops.constant_initializer函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。