當前位置: 首頁>>代碼示例>>Python>>正文


Python init_ops.constant_initializer方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.init_ops.constant_initializer方法的典型用法代碼示例。如果您正苦於以下問題:Python init_ops.constant_initializer方法的具體用法?Python init_ops.constant_initializer怎麽用?Python init_ops.constant_initializer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.constant_initializer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: call

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          _linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          _linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:rnn_cell_impl.py

示例2: _highway

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def _highway(self, inp, out):
    input_size = inp.get_shape().with_rank(2)[1].value
    carry_weight = vs.get_variable("carry_w", [input_size, input_size])
    carry_bias = vs.get_variable(
        "carry_b", [input_size],
        initializer=init_ops.constant_initializer(
            self._carry_bias_init))
    carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
    if self._couple_carry_transform_gates:
      transform = 1 - carry
    else:
      transform_weight = vs.get_variable("transform_w",
                                         [input_size, input_size])
      transform_bias = vs.get_variable(
          "transform_b", [input_size],
          initializer=init_ops.constant_initializer(
              -self._carry_bias_init))
      transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
                                                    transform_weight,
                                                    transform_bias))
    return inp * carry + out * transform 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:rnn_cell.py

示例3: DISABLED_testVar

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def DISABLED_testVar(self):
    with self.test_session() as sess:
      with specs.ops:
        # pylint: disable=undefined-variable
        v = Var("test_var",
                shape=[2, 2],
                initializer=init_ops.constant_initializer(42.0))
      inputs = constant_op.constant(_rand(10, 100))
      outputs = v.funcall(inputs)
      self.assertEqual(len(variables.global_variables()), 1)
      sess.run([outputs.initializer])
      outputs_value = outputs.eval()
      self.assertEqual(outputs_value.shape, (2, 2))
      self.assertEqual(outputs_value[1, 1], 42.0)

  # XXX: the cleverness of this code is over 9000
  # TODO: original author please fix 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:19,代碼來源:specs_test.py

示例4: testIndRNNCell

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testIndRNNCell(self):
    """Tests basic cell functionality"""
    with self.test_session() as sess:
      x = array_ops.zeros([1, 4])
      m = array_ops.zeros([1, 4])

      # Create the cell with input weights = 1 and constant recurrent weights
      recurrent_init = init_ops.constant_initializer([-3., -2., 1., 3.])
      input_init = init_ops.constant_initializer(1.)
      cell = IndRNNCell(num_units=4,
                        recurrent_kernel_initializer=recurrent_init,
                        input_kernel_initializer=input_init,
                        activation=array_ops.identity)
      output, _ = cell(x, m)

      sess.run([variables.global_variables_initializer()])
      res = sess.run([output],
                     {x.name: np.array([[1., 0., 0., 0.]]),
                       m.name: np.array([[2., 2., 2., 2.]])})
      # (Pre)activations (1*1 + 2*rec_weight) should be -5, -3, 3, 7
      self.assertAllEqual(res[0], [[-5., -3., 3., 7.]]) 
開發者ID:batzner,項目名稱:indrnn,代碼行數:23,代碼來源:ind_rnn_cell_test.py

示例5: testIndRNNCellBounds

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testIndRNNCellBounds(self):
    """Tests cell with recurrent weights exceeding the bounds."""
    with self.test_session() as sess:
      x = array_ops.zeros([1, 4])
      m = array_ops.zeros([1, 4])

      # Create the cell with input weights = 1 and constant recurrent weights
      recurrent_init = init_ops.constant_initializer([-5., -2., 0.1, 5.])
      input_init = init_ops.constant_initializer(1.)
      cell = IndRNNCell(num_units=4,
                        recurrent_min_abs=1.,
                        recurrent_max_abs=3.,
                        recurrent_kernel_initializer=recurrent_init,
                        input_kernel_initializer=input_init,
                        activation=array_ops.identity)
      output, _ = cell(x, m)

      sess.run([variables.global_variables_initializer()])
      res = sess.run([output],
                     {x.name: np.array([[1., 0., 0., 0.]]),
                       m.name: np.array([[2., 2., 2., 2.]])})
      # Recurrent weights should be clipped to -3, -2, 1, 3
      # (Pre)activations (1*1 + 2*rec_weight) should be -5, -3, 3, 7
      self.assertAllEqual(res[0], [[-5., -3., 3., 7.]]) 
開發者ID:batzner,項目名稱:indrnn,代碼行數:26,代碼來源:ind_rnn_cell_test.py

示例6: testNoGlobalStep

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testNoGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step=None,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval()) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:26,代碼來源:optimizers_test.py

示例7: testNoGlobalStepWithDecay

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:25,代碼來源:optimizers_test.py

示例8: testUpdateOp

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testUpdateOp(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval()) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:optimizers_test.py

示例9: testUpdateOpNoIncrementGlobalStep

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testUpdateOpNoIncrementGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op],
            increment_global_step=False)
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(0, global_step.eval()) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:25,代碼來源:optimizers_test.py

示例10: testUpdateOpWithNoOpDecay

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testUpdateOpWithNoOpDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval()) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:25,代碼來源:optimizers_test.py

示例11: testUpdateOpFromCollection

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testUpdateOpFromCollection(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
        train = optimizers_lib.optimize_loss(
            loss, global_step, learning_rate=0.1, optimizer=optimizer)
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        var_value, update_var_value, global_step_value = session.run(
            [var, update_var, global_step])
        self.assertEqual(var_value, 9.5)
        self.assertEqual(update_var_value, 20)
        self.assertEqual(global_step_value, 1) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:23,代碼來源:optimizers_test.py

示例12: testHorzConvWithBlankImage

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testHorzConvWithBlankImage(self):
    image = array_ops.ones((1, 10, 10, 1))
    horz_gradients = layers_lib.conv2d_in_plane(
        image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)
      expected = np.zeros((1, 10, 9, 1))

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:18,代碼來源:layers_test.py

示例13: testHorzConvWithRandomImageMultiBatch

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testHorzConvWithRandomImageMultiBatch(self):
    np.random.seed(1)
    image = np.random.rand(5, 10, 10, 1)
    expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]

    tf_image = constant_op.constant(image, dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:21,代碼來源:layers_test.py

示例14: testHorzConvWithRandomImageMultiBatchMultiChannel

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
    np.random.seed(1)
    image = np.random.rand(5, 10, 10, 7)
    expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]

    tf_image = constant_op.constant(image, dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:21,代碼來源:layers_test.py

示例15: testHorzConvWithVaryingImage

# 需要導入模塊: from tensorflow.python.ops import init_ops [as 別名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 別名]
def testHorzConvWithVaryingImage(self):
    image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))

    expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
    expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))

    tf_image = constant_op.constant(
        image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:23,代碼來源:layers_test.py


注:本文中的tensorflow.python.ops.init_ops.constant_initializer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。