当前位置: 首页>>代码示例>>Python>>正文


Python init_ops.constant_initializer方法代码示例

本文整理汇总了Python中tensorflow.python.ops.init_ops.constant_initializer方法的典型用法代码示例。如果您正苦于以下问题:Python init_ops.constant_initializer方法的具体用法?Python init_ops.constant_initializer怎么用?Python init_ops.constant_initializer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.init_ops的用法示例。


在下文中一共展示了init_ops.constant_initializer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          _linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          _linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:rnn_cell_impl.py

示例2: _highway

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def _highway(self, inp, out):
    input_size = inp.get_shape().with_rank(2)[1].value
    carry_weight = vs.get_variable("carry_w", [input_size, input_size])
    carry_bias = vs.get_variable(
        "carry_b", [input_size],
        initializer=init_ops.constant_initializer(
            self._carry_bias_init))
    carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
    if self._couple_carry_transform_gates:
      transform = 1 - carry
    else:
      transform_weight = vs.get_variable("transform_w",
                                         [input_size, input_size])
      transform_bias = vs.get_variable(
          "transform_b", [input_size],
          initializer=init_ops.constant_initializer(
              -self._carry_bias_init))
      transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
                                                    transform_weight,
                                                    transform_bias))
    return inp * carry + out * transform 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:rnn_cell.py

示例3: DISABLED_testVar

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def DISABLED_testVar(self):
    with self.test_session() as sess:
      with specs.ops:
        # pylint: disable=undefined-variable
        v = Var("test_var",
                shape=[2, 2],
                initializer=init_ops.constant_initializer(42.0))
      inputs = constant_op.constant(_rand(10, 100))
      outputs = v.funcall(inputs)
      self.assertEqual(len(variables.global_variables()), 1)
      sess.run([outputs.initializer])
      outputs_value = outputs.eval()
      self.assertEqual(outputs_value.shape, (2, 2))
      self.assertEqual(outputs_value[1, 1], 42.0)

  # XXX: the cleverness of this code is over 9000
  # TODO: original author please fix 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:specs_test.py

示例4: testIndRNNCell

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testIndRNNCell(self):
    """Tests basic cell functionality"""
    with self.test_session() as sess:
      x = array_ops.zeros([1, 4])
      m = array_ops.zeros([1, 4])

      # Create the cell with input weights = 1 and constant recurrent weights
      recurrent_init = init_ops.constant_initializer([-3., -2., 1., 3.])
      input_init = init_ops.constant_initializer(1.)
      cell = IndRNNCell(num_units=4,
                        recurrent_kernel_initializer=recurrent_init,
                        input_kernel_initializer=input_init,
                        activation=array_ops.identity)
      output, _ = cell(x, m)

      sess.run([variables.global_variables_initializer()])
      res = sess.run([output],
                     {x.name: np.array([[1., 0., 0., 0.]]),
                       m.name: np.array([[2., 2., 2., 2.]])})
      # (Pre)activations (1*1 + 2*rec_weight) should be -5, -3, 3, 7
      self.assertAllEqual(res[0], [[-5., -3., 3., 7.]]) 
开发者ID:batzner,项目名称:indrnn,代码行数:23,代码来源:ind_rnn_cell_test.py

示例5: testIndRNNCellBounds

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testIndRNNCellBounds(self):
    """Tests cell with recurrent weights exceeding the bounds."""
    with self.test_session() as sess:
      x = array_ops.zeros([1, 4])
      m = array_ops.zeros([1, 4])

      # Create the cell with input weights = 1 and constant recurrent weights
      recurrent_init = init_ops.constant_initializer([-5., -2., 0.1, 5.])
      input_init = init_ops.constant_initializer(1.)
      cell = IndRNNCell(num_units=4,
                        recurrent_min_abs=1.,
                        recurrent_max_abs=3.,
                        recurrent_kernel_initializer=recurrent_init,
                        input_kernel_initializer=input_init,
                        activation=array_ops.identity)
      output, _ = cell(x, m)

      sess.run([variables.global_variables_initializer()])
      res = sess.run([output],
                     {x.name: np.array([[1., 0., 0., 0.]]),
                       m.name: np.array([[2., 2., 2., 2.]])})
      # Recurrent weights should be clipped to -3, -2, 1, 3
      # (Pre)activations (1*1 + 2*rec_weight) should be -5, -3, 3, 7
      self.assertAllEqual(res[0], [[-5., -3., 3., 7.]]) 
开发者ID:batzner,项目名称:indrnn,代码行数:26,代码来源:ind_rnn_cell_test.py

示例6: testNoGlobalStep

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testNoGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step=None,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:26,代码来源:optimizers_test.py

示例7: testNoGlobalStepWithDecay

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testNoGlobalStepWithDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g):
        x = array_ops.placeholder(dtypes.float32, [])
        var = variable_scope.get_variable(
            "test", [], initializer=init_ops.constant_initializer(10))
        loss = math_ops.abs(var * x)
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        with self.assertRaisesRegexp(
            ValueError, "global_step is required for learning_rate_decay_fn"):
          optimizers_lib.optimize_loss(
              loss,
              global_step=None,
              learning_rate=0.1,
              learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
              optimizer=optimizer,
              update_ops=[update_op]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:25,代码来源:optimizers_test.py

示例8: testUpdateOp

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testUpdateOp(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:24,代码来源:optimizers_test.py

示例9: testUpdateOpNoIncrementGlobalStep

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testUpdateOpNoIncrementGlobalStep(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            optimizer=optimizer,
            update_ops=[update_op],
            increment_global_step=False)
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(0, global_step.eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:25,代码来源:optimizers_test.py

示例10: testUpdateOpWithNoOpDecay

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testUpdateOpWithNoOpDecay(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        train = optimizers_lib.optimize_loss(
            loss,
            global_step,
            learning_rate=0.1,
            learning_rate_decay_fn=_no_op_learning_rate_decay_fn,
            optimizer=optimizer,
            update_ops=[update_op])
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        self.assertEqual(9.5, var.eval())
        self.assertEqual(20, update_var.eval())
        self.assertEqual(1, global_step.eval()) 
开发者ID:google-research,项目名称:tf-slim,代码行数:25,代码来源:optimizers_test.py

示例11: testUpdateOpFromCollection

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testUpdateOpFromCollection(self):
    optimizers = [
        "SGD", gradient_descent.GradientDescentOptimizer,
        gradient_descent.GradientDescentOptimizer(learning_rate=0.1)
    ]
    for optimizer in optimizers:
      with ops.Graph().as_default() as g, self.session(graph=g) as session:
        x, var, loss, global_step = _setup_model()
        update_var = variable_scope.get_variable(
            "update", [], initializer=init_ops.constant_initializer(10))
        update_op = state_ops.assign(update_var, 20)
        ops.add_to_collection(ops.GraphKeys.UPDATE_OPS, update_op)
        train = optimizers_lib.optimize_loss(
            loss, global_step, learning_rate=0.1, optimizer=optimizer)
        variables.global_variables_initializer().run()
        session.run(train, feed_dict={x: 5})
        var_value, update_var_value, global_step_value = session.run(
            [var, update_var, global_step])
        self.assertEqual(var_value, 9.5)
        self.assertEqual(update_var_value, 20)
        self.assertEqual(global_step_value, 1) 
开发者ID:google-research,项目名称:tf-slim,代码行数:23,代码来源:optimizers_test.py

示例12: testHorzConvWithBlankImage

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testHorzConvWithBlankImage(self):
    image = array_ops.ones((1, 10, 10, 1))
    horz_gradients = layers_lib.conv2d_in_plane(
        image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)
      expected = np.zeros((1, 10, 9, 1))

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
开发者ID:google-research,项目名称:tf-slim,代码行数:18,代码来源:layers_test.py

示例13: testHorzConvWithRandomImageMultiBatch

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testHorzConvWithRandomImageMultiBatch(self):
    np.random.seed(1)
    image = np.random.rand(5, 10, 10, 1)
    expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]

    tf_image = constant_op.constant(image, dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
开发者ID:google-research,项目名称:tf-slim,代码行数:21,代码来源:layers_test.py

示例14: testHorzConvWithRandomImageMultiBatchMultiChannel

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testHorzConvWithRandomImageMultiBatchMultiChannel(self):
    np.random.seed(1)
    image = np.random.rand(5, 10, 10, 7)
    expected = image[:, :, 0:-1, :] - image[:, :, 1:, :]

    tf_image = constant_op.constant(image, dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
开发者ID:google-research,项目名称:tf-slim,代码行数:21,代码来源:layers_test.py

示例15: testHorzConvWithVaryingImage

# 需要导入模块: from tensorflow.python.ops import init_ops [as 别名]
# 或者: from tensorflow.python.ops.init_ops import constant_initializer [as 别名]
def testHorzConvWithVaryingImage(self):
    image = np.asmatrix(('1.0 2.0 3.0;' '1.1 2.0 4.0;' '-4.3 0.0 8.9'))

    expected = np.asmatrix(('-1.0 -1.0;' '-0.9 -2.0;' '-4.3 -8.9'))
    expected = np.reshape(np.asarray(expected), (1, 3, 2, 1))

    tf_image = constant_op.constant(
        image, shape=(1, 3, 3, 1), dtype=dtypes.float32)
    horz_gradients = layers_lib.conv2d_in_plane(
        tf_image,
        weights_initializer=init_ops.constant_initializer([1, -1]),
        kernel_size=[1, 2],
        padding='VALID',
        activation_fn=None)
    init_op = variables_lib.global_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      result = sess.run(horz_gradients)

      self.assertAllClose(result, expected, rtol=1e-5, atol=1e-5) 
开发者ID:google-research,项目名称:tf-slim,代码行数:23,代码来源:layers_test.py


注:本文中的tensorflow.python.ops.init_ops.constant_initializer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。