当前位置: 首页>>代码示例>>Python>>正文


Python context.executing_eagerly函数代码示例

本文整理汇总了Python中tensorflow.python.eager.context.executing_eagerly函数的典型用法代码示例。如果您正苦于以下问题:Python executing_eagerly函数的具体用法?Python executing_eagerly怎么用?Python executing_eagerly使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了executing_eagerly函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _test_helper

  def _test_helper(self,
                   inputs,
                   expected_outputs,
                   init_loss_scale=1,
                   incr_every_n_step=2,
                   decr_every_n_nan_or_inf=2):
    ratio = 2
    lsm = lsm_lib.ExponentialUpdateLossScaleManager(
        init_loss_scale=init_loss_scale,
        incr_every_n_steps=incr_every_n_step,
        decr_every_n_nan_or_inf=decr_every_n_nan_or_inf,
        incr_ratio=ratio,
        decr_ratio=1. / ratio)
    itr = _GetExampleIter(inputs)
    update_fn = lambda: lsm.update_loss_scale(itr.get_next())

    self.evaluate(variables.global_variables_initializer())
    actual_outputs = []

    if not context.executing_eagerly():
      update_op = update_fn()
    for _ in range(len(inputs)):
      if context.executing_eagerly():
        update_fn()
      else:
        self.evaluate(update_op)
      actual_outputs.append(self.evaluate(lsm.get_loss_scale()))
    self.assertEqual(actual_outputs, expected_outputs)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:28,代码来源:loss_scale_manager_test.py

示例2: _test_helper

  def _test_helper(self,
                   inputs,
                   expected_outputs,
                   initial_loss_scale=1.,
                   increment_period=2,
                   multiplier=2):
    loss_scale = loss_scale_module.DynamicLossScale(
        initial_loss_scale=initial_loss_scale,
        increment_period=increment_period,
        multiplier=multiplier)
    itr = _get_example_iter(inputs)

    def update():
      is_finite = itr.get_next()
      grad = self._get_tensor(is_finite)
      update_op, should_apply_gradients = loss_scale.update([grad])
      assert_op = check_ops.assert_equal(should_apply_gradients, is_finite)
      if context.executing_eagerly():
        return
      with ops.control_dependencies([assert_op]):
        return array_ops.identity(update_op)

    actual_outputs = []

    if not context.executing_eagerly():
      update_op = update()
      self.evaluate(variables.global_variables_initializer())
    for _ in range(len(inputs)):
      if context.executing_eagerly():
        update()
      else:
        self.evaluate(update_op)
      actual_outputs.append(self.evaluate(loss_scale()))
    self.assertEqual(actual_outputs, expected_outputs)
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:loss_scale_test.py

示例3: _test_summary_for_replica_zero_only

  def _test_summary_for_replica_zero_only(self, d):
    logdir = tempfile.mkdtemp()

    def run_fn():
      """Function executed for each replica."""
      with summary_writer.as_default():
        replica_id = ds_context.get_replica_context().replica_id_in_sync_group
        return summary_ops.write("a", replica_id)

    with self.cached_session() as sess, d.scope(), \
        summary_ops.always_record_summaries():
      # We need global_step because summary writing op *always* has global_step
      # as input, even when we always record summary or never record summary.
      global_step = training_util.get_or_create_global_step()
      if not context.executing_eagerly():
        # When executing eagerly, variables are initialized immediately after
        # creation, and its initializer will be None.
        global_step.initializer.run()
      summary_ops.set_step(0)
      summary_writer = summary_ops.create_file_writer(logdir)
      output = d.extended.call_for_each_replica(run_fn)
      unwrapped = d.unwrap(output)
      if not context.executing_eagerly():
        sess.run(summary_writer.init())
        sess.run(unwrapped)
        sess.run(summary_writer.close())

      events = _events_from_logdir(self, logdir)
      # There will be 2 entries: 1 summary file header entry, and 1 entry
      # written by replica 0.
      self.assertLen(events, 2)
      self.assertEqual(events[1].summary.value[0].tag, "a")
      self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:strategy_test_lib.py

示例4: testRequestNotToCompile

  def testRequestNotToCompile(self):
    with self.test_scope():
      def f(x):
        with ops.device('device:CPU:0'):
          y = 2.0 * x
        return x, y

      wholly_compiled_f = def_function.function(f)
      op_by_op_f = function.defun_with_attributes(
          f, attributes={'_XlaCompile': False})

      x = constant_op.constant([0.0, 2.0], name='data')

      # When function is wholly compiled, all outputs will be on the
      # device on which it is run.
      r_x, r_y = wholly_compiled_f(x)
      self.assertAllEqual([0.0, 2.0], r_x)
      self.assertAllEqual([0.0, 4.0], r_y)
      if context.executing_eagerly():
        # backing_device is only available for eager tensors.
        self.assertRegexpMatches(r_x.backing_device, self.device)
        self.assertRegexpMatches(r_y.backing_device, self.device)

      # When function is executed op-by-op, requested devices will be
      # respected.
      r_x, r_y = op_by_op_f(x)
      self.assertAllEqual([0.0, 2.0], r_x)
      self.assertAllEqual([0.0, 4.0], r_y)
      if context.executing_eagerly():
        # backing_device is only available for eager tensors.
        self.assertRegexpMatches(r_x.backing_device, self.device)
        self.assertRegexpMatches(r_y.backing_device, 'device:CPU:0')
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:32,代码来源:eager_test.py

示例5: testLoadFromNameBasedSaver

 def testLoadFromNameBasedSaver(self):
   """Save a name-based checkpoint, load it using the object-based API."""
   with test_util.device(use_gpu=True):
     save_path = self._write_name_based_checkpoint()
     root = self._initialized_model()
     self._set_sentinels(root)
     with self.assertRaises(AssertionError):
       self._check_sentinels(root)
     object_saver = util.TrackableSaver(graph_view.ObjectGraphView(root))
     self._set_sentinels(root)
     status = object_saver.restore(save_path)
     if context.executing_eagerly():
       self._check_sentinels(root)
     if context.executing_eagerly():
       with self.assertRaisesRegexp(AssertionError, "OBJECT_CONFIG_JSON"):
         status.assert_consumed()
     else:
       # When graph building, we haven't read any keys, so we don't know
       # whether the restore will be complete.
       with self.assertRaisesRegexp(AssertionError, "not restored"):
         status.assert_consumed()
     status.run_restore_ops()
     self._check_sentinels(root)
     self._set_sentinels(root)
     status = object_saver.restore(save_path)
     status.initialize_or_restore()
     self._check_sentinels(root)
开发者ID:jackd,项目名称:tensorflow,代码行数:27,代码来源:checkpointable_utils_test.py

示例6: testTrainNetwork

  def testTrainNetwork(self, distribution, optimizer_fn,
                       use_callable_loss=True):
    with distribution.scope():
      model_fn, dataset_fn, layer = minimize_loss_example(
          optimizer_fn, use_bias=True, use_callable_loss=use_callable_loss)

      ds = distribution.distribute_dataset(dataset_fn)
      if context.executing_eagerly():
        iterator = ds.make_one_shot_iterator()
      else:
        iterator = ds.make_initializable_iterator()

      def run_step():
        return control_flow_ops.group(distribution.unwrap(
            distribution.call_for_each_tower(
                model_fn, iterator.get_next(), run_concurrently=layer.built)))

      if not context.executing_eagerly():
        with self.cached_session() as sess:
          sess.run(iterator.initializer)
          run_step = sess.make_callable(run_step())
        self.evaluate(variables.global_variables_initializer())

      weights, biases = [], []
      for _ in range(10):
        run_step()

        weights.append(self.evaluate(layer.kernel))
        biases.append(self.evaluate(layer.bias))

      error = abs(numpy.add(numpy.squeeze(weights), numpy.squeeze(biases)) - 1)
      is_not_increasing = all(y <= x for x, y in zip(error, error[1:]))
      self.assertTrue(is_not_increasing)
开发者ID:baojianzhou,项目名称:tensorflow,代码行数:33,代码来源:optimizer_v2_test.py

示例7: add_variable

 def add_variable(self, name, shape=None, dtype=None, initializer=None):
   """***Only for use by descendants of Metric***."""
   if self._built:
     raise RuntimeError("Can't call add_variable() except in build().")
   if context.executing_eagerly():
     collections = None
   else:
     if self._use_global_variables:
       collections = [ops.GraphKeys.GLOBAL_VARIABLES]
     else:
       collections = [ops.GraphKeys.LOCAL_VARIABLES]
     collections += [ops.GraphKeys.METRIC_VARIABLES]
   # Variables are Checkpointable dependencies of Metrics regardless of the
   # global/local distinction. Users can avoid saving variables by not adding a
   # dependency on the Metric.
   v = self._add_variable_with_custom_getter(
       name=name,
       shape=shape,
       dtype=dtype,
       initializer=initializer,
       trainable=False,
       collections=collections,
       use_resource=True,
       getter=variable_scope.get_variable,
       # Raise duplicate variable exceptions from get_variable rather than
       # Checkpointable.
       overwrite=True)
   self._vars.append(v)
   if context.executing_eagerly():
     self._initial_values[v] = v.value()
   return v
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:metrics_impl.py

示例8: testSaveRestoreMultipleIterator

 def testSaveRestoreMultipleIterator(self):
   checkpoint_directory = self.get_temp_dir()
   checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
   dataset = dataset_ops.Dataset.from_tensor_slices(
       [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
   dataset = dataset.map(math_ops.square).batch(2)
   iterator_1 = dataset.make_one_shot_iterator()
   get_next_1 = iterator_1.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_1.get_next())
   iterator_2 = dataset.make_one_shot_iterator()
   get_next_2 = iterator_2.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_2.get_next())
   dataset_2 = dataset_ops.Dataset.range(10)
   iterator_3 = dataset_2.make_one_shot_iterator()
   get_next_3 = iterator_3.get_next if context.executing_eagerly(
   ) else functools.partial(self.evaluate, iterator_3.get_next())
   checkpoint = checkpointable_utils.Checkpoint(
       iterator_1=iterator_1, iterator_2=iterator_2, iterator_3=iterator_3)
   self.assertAllEqual([1, 4], get_next_1())
   self.assertAllEqual(0, get_next_3())
   self.assertAllEqual(1, get_next_3())
   self.assertAllEqual(2, get_next_3())
   save_path = checkpoint.save(checkpoint_prefix)
   self.assertAllEqual([1, 4], get_next_2())
   self.assertAllEqual([9, 16], get_next_2())
   self.assertAllEqual(3, get_next_3())
   checkpoint.restore(save_path).run_restore_ops()
   self.assertAllEqual([9, 16], get_next_1())
   self.assertAllEqual([1, 4], get_next_2())
   self.assertAllEqual(3, get_next_3())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:30,代码来源:iterator_ops_test.py

示例9: testDeferredSlotRestoration

  def testDeferredSlotRestoration(self):
    checkpoint_directory = self.get_temp_dir()

    root = trackable_utils.Checkpoint()
    root.var = trackable_utils.add_variable(
        root, name="var", initializer=0.)
    optimizer = adam.AdamOptimizer(0.1)
    if context.executing_eagerly():
      optimizer.minimize(root.var.read_value)
    else:
      train_op = optimizer.minimize(root.var)
      # Note that `optimizer` has not been added as a dependency of
      # `root`. Create a one-off grouping so that slot variables for `root.var`
      # get initialized too.
      self.evaluate(trackable_utils.gather_initializers(
          trackable_utils.Checkpoint(root=root, optimizer=optimizer)))
      self.evaluate(train_op)
    self.evaluate(state_ops.assign(root.var, 12.))
    no_slots_path = root.save(os.path.join(checkpoint_directory, "no_slots"))
    root.optimizer = optimizer
    self.evaluate(state_ops.assign(root.var, 13.))
    self.evaluate(state_ops.assign(optimizer.get_slot(name="m", var=root.var),
                                   14.))
    slots_path = root.save(os.path.join(checkpoint_directory, "with_slots"))
    new_root = trackable_utils.Checkpoint()
    # Load the slot-containing checkpoint (deferred), then immediately overwrite
    # the non-slot variable (also deferred).
    slot_status = new_root.restore(slots_path)
    no_slot_status = new_root.restore(no_slots_path)
    with self.assertRaises(AssertionError):
      no_slot_status.assert_consumed()
    new_root.var = trackable_utils.add_variable(
        new_root, name="var", shape=[])
    no_slot_status.assert_consumed()
    no_slot_status.run_restore_ops()
    self.assertEqual(12., self.evaluate(new_root.var))
    new_root.optimizer = adam.AdamOptimizer(0.1)
    slot_status.assert_existing_objects_matched()
    with self.assertRaisesRegexp(AssertionError, "beta1_power"):
      slot_status.assert_consumed()
    self.assertEqual(12., self.evaluate(new_root.var))
    if context.executing_eagerly():
      # Slot variables are only created with restoring initializers when
      # executing eagerly.
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
    else:
      self.assertIs(new_root.optimizer.get_slot(name="m", var=new_root.var),
                    None)
    if context.executing_eagerly():
      new_root.optimizer.minimize(new_root.var.read_value)
    else:
      train_op = new_root.optimizer.minimize(new_root.var)
      # The slot variable now exists; restore() didn't create it, but we should
      # now have a restore op for it.
      slot_status.run_restore_ops()
      self.assertEqual(14., self.evaluate(
          new_root.optimizer.get_slot(name="m", var=new_root.var)))
      self.evaluate(train_op)
    slot_status.assert_consumed()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:60,代码来源:util_with_v1_optimizers_test.py

示例10: test_dropout_mask_reuse

  def test_dropout_mask_reuse(self):
    # The layer is created with recurrent_initializer = zero, so that the
    # the recurrent state won't affect the output. By doing this, we can verify
    # the output and see if the same mask is applied to for each timestep.
    rnn = keras.layers.SimpleRNN(3,
                                 dropout=0.5,
                                 kernel_initializer='ones',
                                 recurrent_initializer='zeros',
                                 return_sequences=True,
                                 unroll=True)

    inputs = constant_op.constant(1.0, shape=(6, 2, 5))
    out = rnn(inputs, training=True)
    if not context.executing_eagerly():
      self.evaluate(variables_lib.global_variables_initializer())
    batch_1 = self.evaluate(out)
    batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
    self.assertAllClose(batch_1_t0, batch_1_t1)

    # This simulate the layer called with multiple batches in eager mode
    if context.executing_eagerly():
      out2 = rnn(inputs, training=True)
    else:
      out2 = out
    batch_2 = self.evaluate(out2)
    batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
    self.assertAllClose(batch_2_t0, batch_2_t1)

    # Also validate that different dropout is used by between batches.
    self.assertNotAllClose(batch_1_t0, batch_2_t0)
    self.assertNotAllClose(batch_1_t1, batch_2_t1)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:31,代码来源:recurrent_test.py

示例11: testDynamicShapeVariableWithCallableInit

  def testDynamicShapeVariableWithCallableInit(self):
    var0 = variable_scope.get_variable("var0",
                                       initializer=constant_op.constant(1.),
                                       validate_shape=False)
    self.assertFalse(var0.shape.is_fully_defined())

    grads0 = constant_op.constant(0.1, dtype=dtypes.float32)
    learning_rate = lambda: 3.0

    ada_opt = adagrad.AdagradOptimizer(
        learning_rate, initial_accumulator_value=0.1, use_locking=True)

    if not context.executing_eagerly():
      ada_update = ada_opt.apply_gradients(
          zip([grads0], [var0]))
      self.evaluate(variables.global_variables_initializer())

    # Fetch params to validate initial values
    v0_val = self.evaluate([var0])
    self.assertAllClose([1.0], v0_val)

    # Run 3 steps of adagrad
    for _ in range(3):
      if not context.executing_eagerly():
        self.evaluate(ada_update)
      else:
        ada_opt.apply_gradients(zip([grads0], [var0]))

    # Validate updated params
    v0_val = self.evaluate([var0])
    self.assertAllCloseAccordingToType(
        np.array([-1.6026098728179932]), v0_val)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:32,代码来源:adagrad_test.py

示例12: testCriticalSectionInParallelDoesntDeadlockOnError

  def testCriticalSectionInParallelDoesntDeadlockOnError(self):
    # No eager mode execution of this test because eager does not
    # run fn() in parallel, which is where the deadlock could
    # potentially occur (in graph mode).
    cs = critical_section_ops.CriticalSection(shared_name="cs")
    v = resource_variable_ops.ResourceVariable(0.0, name="v")

    def fn(i):
      error = control_flow_ops.Assert((i % 2) == 1, ["Error"])
      with ops.control_dependencies([error]):
        return v.read_value()

    num_concurrent = 2

    @def_function.function(autograph=False)
    def run_concurrently():
      return [cs.execute(lambda: fn(i)) for i in range(num_concurrent)]

    if not context.executing_eagerly():
      run_concurrently = run_concurrently()

    self.evaluate(v.initializer)
    for _ in range(100):
      with self.assertRaisesOpError("Error"):
        if context.executing_eagerly():
          run_concurrently()
        else:
          self.evaluate(run_concurrently)
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:critical_section_test.py

示例13: testBasicWithLearningRateDecay

  def testBasicWithLearningRateDecay(self):
    for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
      with self.cached_session():
        var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
        grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
        grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
        learning_rate = 3.0
        decay = 0.5
        sgd = gradient_descent.SGD(learning_rate=learning_rate, decay=decay)
        if not context.executing_eagerly():
          sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        self.evaluate(variables.global_variables_initializer())
        # Run 2 steps of sgd
        if not context.executing_eagerly():
          self.evaluate(sgd_op)
        else:
          sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        # Validate updated params
        self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
                                           self.evaluate(var0))
        self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
                                           self.evaluate(var1))

        if not context.executing_eagerly():
          self.evaluate(sgd_op)
        else:
          sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1],
            self.evaluate(var0))
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
            self.evaluate(var1))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:gradient_descent_test.py

示例14: _test_basic_sgd_with_learning_rate_decay

  def _test_basic_sgd_with_learning_rate_decay(self, sgd, dtype):
    var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
    var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
    grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
    grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
    if not context.executing_eagerly():
      sgd_op = sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    self.evaluate(variables.global_variables_initializer())
    # Run 2 steps of sgd
    if not context.executing_eagerly():
      self.evaluate(sgd_op)
    else:
      sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    # Validate updated params
    self.assertAllCloseAccordingToType([1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1],
                                       self.evaluate(var0))
    self.assertAllCloseAccordingToType([3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01],
                                       self.evaluate(var1))

    if not context.executing_eagerly():
      self.evaluate(sgd_op)
    else:
      sgd.apply_gradients(zip([grads0, grads1], [var0, var1]))
    # Validate updated params
    self.assertAllCloseAccordingToType(
        [1.0 - 3.0 * 0.1 - 2.0 * 0.1, 2.0 - 3.0 * 0.1 - 2.0 * 0.1],
        self.evaluate(var0))
    self.assertAllCloseAccordingToType(
        [3.0 - 3.0 * 0.01 - 2.0 * 0.01, 4.0 - 3.0 * 0.01 - 2.0 * 0.01],
        self.evaluate(var1))
开发者ID:kylin9872,项目名称:tensorflow,代码行数:30,代码来源:gradient_descent_test.py

示例15: test_apply_gradients

  def test_apply_gradients(self):

    x = variable_scope.get_variable("x", initializer=1., dtype=dtypes.float32)
    dataset = dataset_ops.Dataset.from_tensor_slices([np.nan, np.inf, 0.1])
    itr = dataset.make_one_shot_iterator()

    lr = 1
    opt = gd.GradientDescentOptimizer(lr)
    lsm = lsm_lib.FixedLossScaleManager(1.e4)
    opt = lso.LossScaleOptimizer(opt, lsm)
    train_fn = lambda: opt.apply_gradients([(itr.get_next(), x)])
    if not context.executing_eagerly():
      train_op = train_fn()

    expected_output = [1, 1, 1 - 0.1]
    actual_output = []

    self.evaluate(variables.global_variables_initializer())
    for _ in range(3):
      # nan or inf is not applied.
      if context.executing_eagerly():
        train_fn()
      else:
        self.evaluate(train_op)
      actual_output.append(self.evaluate(x))
    self.assertAllClose(expected_output, actual_output)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:loss_scale_optimizer_test.py


注:本文中的tensorflow.python.eager.context.executing_eagerly函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。