当前位置: 首页>>代码示例>>Python>>正文


Python ops.enable_eager_execution函数代码示例

本文整理汇总了Python中tensorflow.python.framework.ops.enable_eager_execution函数的典型用法代码示例。如果您正苦于以下问题:Python enable_eager_execution函数的具体用法?Python enable_eager_execution怎么用?Python enable_eager_execution使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了enable_eager_execution函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: wrapper

 def wrapper(*args, **kwargs):
   try:
     return fn(*args, **kwargs)
   finally:
     del context._context
     context._context = context.Context()
     ops.enable_eager_execution()
开发者ID:kylin9872,项目名称:tensorflow,代码行数:7,代码来源:config_test.py

示例2: setUp

  def setUp(self):
    # test for enable eager test
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())

    # Calling enable eager execution a second time should not cause an error.
    ops.enable_eager_execution()
    self.assertTrue(context.executing_eagerly())
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:8,代码来源:ops_enable_eager_test.py

示例3: enable_v2_behavior

def enable_v2_behavior():
  """Enables TensorFlow 2.x behaviors.

  This function can be called at the beginning of the program (before `Tensors`,
  `Graphs` or other structures have been created, and before devices have been
  initialized. It switches all global behaviors that are different between
  TensorFlow 1.x and 2.x to behave as intended for 2.x.

  This function is called in the main TensorFlow `__init__.py` file, user should
  not need to call it, except during complex migrations.
  """
  tf2.enable()  # Switches TensorArrayV2 and control flow V2
  ops.enable_eager_execution()
  tensor_shape.enable_v2_tensorshape()  # Also switched by tf2
  variable_scope.enable_resource_variables()
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:15,代码来源:v2_compat.py

示例4: main

def main(_):
  if flags.FLAGS.enable_eager:
    ops.enable_eager_execution()
    logging.info('Eager execution enabled for MNIST Multi-Worker.')
  else:
    logging.info('Eager execution not enabled for MNIST Multi-Worker.')

  # Build the train and eval datasets from the MNIST data.
  train_ds, eval_ds = get_input_datasets()

  if flags.FLAGS.distribution_strategy == 'multi_worker_mirrored':
    # MultiWorkerMirroredStrategy for multi-worker distributed MNIST training.
    strategy = collective_strategy.CollectiveAllReduceStrategy()
  else:
    raise ValueError('Only `multi_worker_mirrored` is supported strategy '
                     'in Keras MNIST example at this time. Strategy passed '
                     'in is %s' % flags.FLAGS.distribution_strategy)

  # Create and compile the model under Distribution strategy scope.
  # `fit`, `evaluate` and `predict` will be distributed based on the strategy
  # model was compiled with.
  with strategy.scope():
    model = get_model()
    optimizer = rmsprop.RMSProp(learning_rate=0.001)
    model.compile(
        loss=keras.losses.categorical_crossentropy,
        optimizer=optimizer,
        metrics=['accuracy'])

  # Train the model with the train dataset.
  tensorboard_callback = keras.callbacks.TensorBoard(
      log_dir=flags.FLAGS.model_dir)
  model.fit(
      x=train_ds,
      epochs=20,
      steps_per_epoch=468,
      callbacks=[tensorboard_callback])

  # Evaluate the model with the eval dataset.
  score = model.evaluate(eval_ds, steps=10, verbose=0)
  logging.info('Test loss:{}'.format(score[0]))
  logging.info('Test accuracy:{}'.format(score[1]))
开发者ID:aritratony,项目名称:tensorflow,代码行数:42,代码来源:mnist_multi_worker.py

示例5: test_no_loss_in_call

        loss='sparse_categorical_crossentropy',
        optimizer=RMSPropOptimizer(learning_rate=0.001))
    x = np.ones((100, 4), dtype=np.float32)
    np.random.seed(123)
    y = np.random.randint(0, 1, size=(100, 1))
    dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
    dataset = dataset.repeat(100)
    dataset = dataset.batch(10)
    iterator = dataset.make_one_shot_iterator()
    history = model.fit(iterator, epochs=1, steps_per_epoch=10)
    self.assertEqual(np.around(history.history['loss'][-1], decimals=4), 0.6173)

  def test_no_loss_in_call(self):

    class HasLoss(keras.layers.Layer):

      def call(self, x):
        self.add_loss(x)
        return x

    layer = HasLoss()
    with self.assertRaises(RuntimeError):
      layer(1.)

    with ops.Graph().as_default():
      layer(1.)

if __name__ == '__main__':
  ops.enable_eager_execution()
  test.main()
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:30,代码来源:training_eager_test.py

示例6: train

    @function.defun
    def train():
      v = resource_variable_ops.ResourceVariable(1.0)
      grad = backprop.implicit_grad(loss)(v)
      optimizer.apply_gradients(grad)
      return v.read_value()

    train()

  def testOptimizerInDefunWithCapturedVariable(self):
    v = resource_variable_ops.ResourceVariable(1.0)
    def loss():
      return v**2

    optimizer = momentum.MomentumOptimizer(learning_rate=1.0, momentum=1.0)

    @function.defun
    def train():
      grad = backprop.implicit_grad(loss)()
      optimizer.apply_gradients(grad)

    train()
    self.assertEqual(v.numpy(), -1.0)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={'CPU': 3}))
  test.main()
开发者ID:StephenOman,项目名称:tensorflow,代码行数:29,代码来源:function_test.py

示例7: testOptimization

  def testOptimization(self):
    dataset = dataset_ops.Dataset.range(10)
    dataset = dataset.apply(optimization.assert_next(["MemoryCacheImpl"]))
    dataset = dataset.skip(0)  # this should be optimized away
    dataset = dataset.cache()

    options = dataset_ops.Options()
    options.experimental_optimization.noop_elimination = True
    dataset = dataset.with_options(options)

    multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator(
        dataset, ["/cpu:1", "/cpu:2"])

    config = config_pb2.ConfigProto(device_count={"CPU": 3})
    with self.test_session(config=config):
      self.evaluate(multi_device_iterator.initializer)
      for i in range(0, 10, 2):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.assertEqual(i, self.evaluate(elem_on_1))
        self.assertEqual(i + 1, self.evaluate(elem_on_2))
      with self.assertRaises(errors.OutOfRangeError):
        elem_on_1, elem_on_2 = multi_device_iterator.get_next()
        self.evaluate(elem_on_1)
        self.evaluate(elem_on_2)


if __name__ == "__main__":
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(device_count={"CPU": 3, "GPU": 1}))
  test.main()
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:30,代码来源:multi_device_iterator_test.py

示例8: main

def main(argv=None):  # pylint: disable=function-redefined
  _ops.enable_eager_execution()
  _test.main(argv)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:3,代码来源:test.py

示例9: MultiDeviceTest

class MultiDeviceTest(xla_test.XLATestCase):
  """Test running TPU computation on more than one core."""

  def testBasic(self):
    if not multiple_tpus():
      self.skipTest('MultiDeviceTest requires multiple TPU devices.')

    # Compute 10 on TPU core 0
    with ops.device('device:TPU:0'):
      two = constant_op.constant(2)
      five = constant_op.constant(5)
      ten = two * five
      self.assertAllEqual(10, ten)

    # Compute 6 on TPU core 1
    with ops.device('device:TPU:1'):
      two = constant_op.constant(2)
      three = constant_op.constant(3)
      six = two * three
      self.assertAllEqual(6, six)

    # Copy 10 and 6 to CPU and sum them
    self.assertAllEqual(16, ten + six)


if __name__ == '__main__':
  ops.enable_eager_execution(
      config=config_pb2.ConfigProto(log_device_placement=True))
  googletest.main()
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:29,代码来源:eager_test.py

示例10: main

def main(argv=None):
  _ops.enable_eager_execution()
  _test.main(argv)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:3,代码来源:test.py


注:本文中的tensorflow.python.framework.ops.enable_eager_execution函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。