当前位置: 首页>>代码示例>>Python>>正文


Python while_v2.while_loop_v2函数代码示例

本文整理汇总了Python中tensorflow.python.ops.while_v2.while_loop_v2函数的典型用法代码示例。如果您正苦于以下问题:Python while_loop_v2函数的具体用法?Python while_loop_v2怎么用?Python while_loop_v2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了while_loop_v2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testMultipleWhileLoops

 def testMultipleWhileLoops(self):
   x = constant_op.constant(2.)
   ret1 = while_loop_v2(lambda v: v < 4., lambda v: v * v, [x])  # x**2
   ret2 = while_loop_v2(lambda v: v < 16., lambda v: v * v, ret1)  # x**4
   grad = gradients_impl.gradients(ret2, [x])  # 4x**3
   grad_grad = gradients_impl.gradients(grad, [x])  # 12x**2
   with self.cached_session() as sess:
     self.assertSequenceEqual(sess.run(grad), [32.])
     self.assertSequenceEqual(sess.run(grad_grad), [48.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:9,代码来源:while_v2_test.py

示例2: testNestedWhileAndTensorArray

  def testNestedWhileAndTensorArray(self):
    n = constant_op.constant(3.0)

    def Body(row, ta, n):

      def InnerBody(row, col, ta, n):
        # Note: row and col are 1-based.
        ta = ta.write(
            math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
        return row, col + 1., ta, n

      # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
      ta = while_loop_v2(
          lambda _, col, _1, n: col <= n,
          InnerBody, [row, constant_op.constant(1.), ta, n],
          return_same_structure=False)[2]
      return row + 1., ta, n

    ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=9)
    ta = while_loop_v2(
        lambda row, _, _1: row <= n,
        Body, [constant_op.constant(1.), ta, n],
        return_same_structure=False)[1]

    output = array_ops.reshape(ta.stack(), [3, 3])
    self.assertAllEqual(
        self.evaluate(output), [[1., 2., 3.], [2., 4., 6.], [3., 6., 9.]])
开发者ID:aeverall,项目名称:tensorflow,代码行数:27,代码来源:while_v2_test.py

示例3: testDuplicateAccumulator

  def testDuplicateAccumulator(self):
    x = constant_op.constant(2.)

    tensor_list = list_ops.empty_tensor_list(
        element_dtype=dtypes.float32, element_shape=ScalarShape())

    def Cond(x, tl):
      del tl  # Unused for Cond.
      return x < 5.

    def Body(x, tl):
      # There is an accumulator in the loop already so we should not add
      # another.
      tl = list_ops.tensor_list_push_back(tl, x)
      return x**2., tl

    ret = while_loop_v2(Cond, Body, [x, tensor_list])

    for op in ops.get_default_graph().get_operations():
      if op.type == "While":
        while_op = op

    body_graph = while_v2._get_body_graph(while_op)
    # body_graph.inputs: [counter_arg, x_arg, tl_arg, *accumulators]
    x_input_t = body_graph.inputs[1]
    accumulator_count = len(
        [c for c in x_input_t.consumers() if c.type == "TensorListPushBack"])
    self.assertEqual(accumulator_count, 1)

    grad = gradients_impl.gradients(ret[0], x)
    with self.cached_session() as sess:
      self.assertEqual(sess.run(ret[0]), 16.)
      self.assertSequenceEqual(sess.run(grad), [32.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:33,代码来源:while_v2_test.py

示例4: testSingleLoopVar

 def testSingleLoopVar(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v * v, [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 16.)
     self.assertSequenceEqual(sess.run(grad), [32.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:7,代码来源:while_v2_test.py

示例5: testMultipleLoopVars

  def testMultipleLoopVars(self):
    x = constant_op.constant(5.)
    y = constant_op.constant(3.)

    # x = 5.
    # y = 3.
    # while x < 45.:
    #   x = x * y
    #   y = x + y
    ret = while_loop_v2(lambda v, _: v < 45., lambda v, w: (v * w, v + w),
                        [x, y])
    # ret = [y*x**2 + x*y**2, x*y + x + y]

    gradx_0 = gradients_impl.gradients(ret[0], [x])  # [2*x*y + y**2]
    gradx_1 = gradients_impl.gradients(ret[1], [x])  # [y + 1]
    gradx_2 = gradients_impl.gradients(ret, [x])  # [2*x*y + y**2 + 2*y + 1]
    grady_0 = gradients_impl.gradients(ret[0], [y])  # [2*x*y + x**2]
    grady_1 = gradients_impl.gradients(ret[1], [y])  # [x + 1]
    grady_2 = gradients_impl.gradients(ret, [y])  # [2*x*y + x**2 + x + 1]
    with self.cached_session() as sess:
      self.assertSequenceEqual(sess.run(ret), [120., 23.])
      self.assertSequenceEqual(sess.run(gradx_0), [39.])
      self.assertSequenceEqual(sess.run(gradx_1), [4.])
      self.assertSequenceEqual(sess.run(gradx_2), [43.])
      self.assertSequenceEqual(sess.run(grady_0), [55.])
      self.assertSequenceEqual(sess.run(grady_1), [6.])
      self.assertSequenceEqual(sess.run(grady_2), [61.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:27,代码来源:while_v2_test.py

示例6: fnWithLoop

 def fnWithLoop():  # pylint: disable=invalid-name
   with backprop.GradientTape() as tape:
     _, x = while_loop_v2(
         lambda i, _: i < 2,
         lambda i, x: (i + 1, x * v),
         [0, 2.])
   return tape.gradient(x, v)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:7,代码来源:while_v2_test.py

示例7: testSingleLoopVar

 def testSingleLoopVar(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=False)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(self.evaluate(ret), 16.)
     self.assertSequenceEqual(self.evaluate(grad), [32.])
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:while_v2_test.py

示例8: testCaptureExternalTensorInBody

 def testCaptureExternalTensorInBody(self):
   x = constant_op.constant(2.)
   y = constant_op.constant(3.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v * y, [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 18.)
     self.assertSequenceEqual(sess.run(grad), [9.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:8,代码来源:while_v2_test.py

示例9: testCaptureExternalTensorInCond

 def testCaptureExternalTensorInCond(self):
   x = constant_op.constant(2.)
   y = constant_op.constant(1.)
   ret = while_loop_v2(lambda v: v + y < 9., lambda v: v * 3., [x])
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     self.assertEqual(self.evaluate(ret), 18.)
     self.assertSequenceEqual(self.evaluate(grad), [9.])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:while_v2_test.py

示例10: testDoubleDerivative

 def testDoubleDerivative(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(lambda v: v < 8., lambda v: v**2, [x])  # x**4
   grad = gradients_impl.gradients(ret, [x])  # 4x**3
   grad_grad = gradients_impl.gradients(grad, [x])  # 12x**2
   with self.cached_session() as sess:
     self.assertEqual(sess.run(ret), 16.)
     self.assertSequenceEqual(sess.run(grad), [32.])
     self.assertSequenceEqual(sess.run(grad_grad), [48.])
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:9,代码来源:while_v2_test.py

示例11: testGradientTape

 def testGradientTape(self):
   with backprop.GradientTape() as t:
     x = constant_op.constant(2.)
     t.watch(x)
     ret = while_loop_v2(
         lambda v: v < 4., lambda v: v * v, [x],
         return_same_structure=False)  # x**2
   grad = t.gradient(ret, x)
   with self.cached_session() as sess:
     self.assertAllEqual(sess.run(grad), 4.0)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:10,代码来源:while_v2_test.py

示例12: testAccumulatorElementShape

  def testAccumulatorElementShape(self, shape):

    def MatchShape(actual_tensor_shape):
      # Compare the shapes, treating None dimensions as equal. We do not
      # directly check actual_tensor_shape and tf.TensorShape(shape) for
      # equality because tf.Dimension.__eq__ returns None if either dimension is
      # None.
      if shape is None:
        self.assertIsNone(actual_tensor_shape.dims)
      else:
        self.assertListEqual(actual_tensor_shape.as_list(), shape)

    def GetAccumulatorForInputAtIndex(while_op, idx):
      body_graph = while_v2._get_graph(while_op, "body")
      y_input_t = body_graph.inputs[idx]
      push_back_node = [c for c in y_input_t.consumers()
                        if c.type == "TensorListPushBack"][0]
      output_idx = body_graph.outputs.index(push_back_node.outputs[0])
      return while_op.outputs[output_idx]

    x = array_ops.placeholder(dtype=dtypes.float32, shape=shape)
    y = array_ops.placeholder(dtype=dtypes.float32, shape=shape)

    # Forward pass.
    ret = while_loop_v2(lambda v, u: v < 8.,
                        lambda v, u: (math_ops.pow(v, u), u),
                        [x, y],
                        return_same_structure=True)
    while_op = ret[0].op.inputs[0].op
    # Gradient pass.
    grad = gradients_impl.gradients(ret[0], x)
    # Note: There is an Identity b/w grad[0] and the While op.
    grad_while_op = grad[0].op.inputs[0].op

    # Get the TensorList output of While op containing the accumulated values
    # of y.
    x_input_index = [i for i, inp in enumerate(while_op.inputs) if x == inp][0]
    output = GetAccumulatorForInputAtIndex(while_op, x_input_index)
    _, val = list_ops.tensor_list_pop_back(output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)

    # Take second derivative to generate intermediate grad_while_op outputs
    gradients_impl.gradients(grad, x)

    # Get the TensorList output of gradient While op containing the accumulated
    # values of grad_x (note that grad_x is needed by the second derivative).
    # grad_while_op.inputs:
    grad_output_index = grad_while_op.outputs.index(grad[0].op.inputs[0])
    grad_output = GetAccumulatorForInputAtIndex(grad_while_op,
                                                grad_output_index)
    _, val = list_ops.tensor_list_pop_back(grad_output,
                                           element_dtype=dtypes.float32)
    MatchShape(val.shape)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:54,代码来源:while_v2_test.py

示例13: testReturnSameStructureTrue

 def testReturnSameStructureTrue(self):
   x = constant_op.constant(2.)
   ret = while_loop_v2(
       lambda v: v < 8., lambda v: v * v, [x], return_same_structure=True)
   grad = gradients_impl.gradients(ret, [x])
   with self.cached_session() as sess:
     eval_result = sess.run(ret)
     self.assertIsInstance(eval_result, list)
     self.assertLen(eval_result, 1)
     self.assertEqual(16., eval_result[0])
     self.assertSequenceEqual(sess.run(grad), [32.])
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:11,代码来源:while_v2_test.py

示例14: Body

    def Body(row, ta, n):

      def InnerBody(row, col, ta, n):
        # Note: row and col are 1-based.
        ta = ta.write(
            math_ops.cast(n * (row - 1.) + col - 1., dtypes.int32), row * col)
        return row, col + 1., ta, n

      # TODO(b/118457764): Remove n from loop_vars from both loops once fixed.
      ta = while_loop_v2(lambda _, col, _1, n: col <= n, InnerBody,
                         [row, constant_op.constant(1.), ta, n])[2]
      return row + 1., ta, n
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:12,代码来源:while_v2_test.py

示例15: testIdentityNodeInBody

  def testIdentityNodeInBody(self):

    def Body(v):
      v = array_ops.identity(v)
      v = array_ops.identity(v)
      return v * v

    x = constant_op.constant(2.)
    ret = while_loop_v2(lambda v: v < 8., Body, [x])
    grad = gradients_impl.gradients(ret, [x])
    with self.cached_session() as sess:
      self.assertEqual(self.evaluate(ret), 16.)
      self.assertSequenceEqual(self.evaluate(grad), [32.])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:13,代码来源:while_v2_test.py


注:本文中的tensorflow.python.ops.while_v2.while_loop_v2函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。