当前位置: 首页>>代码示例>>Python>>正文


Python distribute_coordinator.run_distribute_coordinator函数代码示例

本文整理汇总了Python中tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator函数的典型用法代码示例。如果您正苦于以下问题:Python run_distribute_coordinator函数的具体用法?Python run_distribute_coordinator怎么用?Python run_distribute_coordinator使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了run_distribute_coordinator函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testBetweenGraphContextWithChief

  def testBetweenGraphContextWithChief(self):
    # Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
    cluster_spec = copy.deepcopy(self._cluster_spec)
    cluster_spec[CHIEF] = ["fake_chief"]

    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=True),
        cluster_spec=cluster_spec,
        rpc_layer="grpc")

    # There are one CHIEF and three workers.
    self.assertEqual(len(self._worker_context), 2)
    self.assertTrue(CHIEF in self._worker_context)
    self.assertTrue(WORKER in self._worker_context)
    self.assertEqual(len(self._worker_context[CHIEF]), 1)
    self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._worker_context[CHIEF][0],
                     ("grpc://fake_chief", 4, True, True))
    self.assertEqual(
        self._worker_context[WORKER][0],
        (_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
    self.assertEqual(
        self._worker_context[WORKER][1],
        (_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
    self.assertEqual(
        self._worker_context[WORKER][2],
        (_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:distribute_coordinator_test.py

示例2: testInGraph

 def testInGraph(self):
   """Test it runs in-graph replicated training correctly."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       cluster_spec=self._cluster_spec,
       between_graph=False)
   self.assertEqual(self._result_correct, 1)
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py

示例3: testRpcLayerEnvironmentVariable

  def testRpcLayerEnvironmentVariable(self):
    cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
    tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}

    rpc_layer_from_coordinator = [None]

    def _run_mock_server(cluster_spec=None,
                         task_type=None,
                         task_id=None,
                         session_config=None,
                         rpc_layer=None,
                         environment=None):
      del cluster_spec, task_type, task_id, session_config, environment
      rpc_layer_from_coordinator[0] = rpc_layer
      return MockServer()

    with test.mock.patch.dict(
        "os.environ",
        {"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
            distribute_coordinator, "_run_std_server", _run_mock_server):
      distribute_coordinator.run_distribute_coordinator(
          None,
          MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="ps",
          task_id=0)
    self.assertEqual(rpc_layer_from_coordinator[0], "cake")
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:distribute_coordinator_test.py

示例4: testInGraphContextWithEval

  def testInGraphContextWithEval(self):
    # Adds a EVALUATOR job.
    cluster_spec = copy.deepcopy(self._cluster_spec)
    cluster_spec[EVALUATOR] = ["fake_evaluator"]

    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=False),
        cluster_spec=cluster_spec,
        rpc_layer=None)

    # There are one "None" task and one EVALUATOR task.
    self.assertEqual(len(self._worker_context), 2)
    self.assertTrue("None" in self._worker_context)
    self.assertTrue(EVALUATOR in self._worker_context)
    self.assertEqual(len(self._worker_context["None"]), 1)
    self.assertEqual(len(self._worker_context[EVALUATOR]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
        _bytes_to_str(self._workers[0].target)), 3, True, True))
    self.assertEqual(self._worker_context[EVALUATOR][0],
                     ("fake_evaluator", 3, True, False))
开发者ID:aritratony,项目名称:tensorflow,代码行数:25,代码来源:distribute_coordinator_test.py

示例5: _run_standalone_client

def _run_standalone_client(test_obj, strategy, cluster_spec):
  input_shape = (28, 28, 1)
  with strategy.scope():
    orig_model = _get_model(input_shape)

  def worker_fn(strategy):
    with ops.Graph().as_default():
      batch_size = 64
      steps = 2

      with strategy.scope():
        train_ds, _ = _mnist_synthetic_dataset(batch_size, steps)
        model = _clone_and_build_model(orig_model, strategy)

        orig_loss, orig_acc = model.evaluate(train_ds, steps=steps)

        # Workaround for the metrics issue (b/122928955) in async training. This
        # can only be used in standalone client mode.
        dc_context.get_current_worker_context().wait_for_other_workers()

        model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)

        dc_context.get_current_worker_context().wait_for_other_workers()

        trained_loss, trained_acc = model.evaluate(train_ds, steps=steps)

      test_obj.assertLessEqual(trained_loss, orig_loss)
      test_obj.assertGreaterEqual(trained_acc, orig_acc)

  dc.run_distribute_coordinator(
      worker_fn,
      strategy,
      mode=dc.CoordinatorMode.STANDALONE_CLIENT,
      cluster_spec=cluster_spec)
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:multi_worker_test.py

示例6: testInGraphStandaloneMode

 def testInGraphStandaloneMode(self):
   """Test it runs in-graph replication in standalone client mode."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       MockStrategy(between_graph=False),
       cluster_spec=self._cluster_spec)
   self.assertEqual(self._result_correct, 1)
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py

示例7: testInGraphSplitMode

 def testInGraphSplitMode(self):
   """Test it runs in-graph replication in split client mode."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       cluster_spec=self._cluster_spec,
       between_graph=False)
   self.assertEqual(self._result_correct, 1)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py

示例8: _thread_fn

 def _thread_fn(cluster_spec):
   distribute_coordinator.run_distribute_coordinator(
       None,
       None,
       mode=INDEPENDENT_WORKER,
       cluster_spec=cluster_spec,
       task_type="ps",
       task_id=0)
开发者ID:mrlittlepig,项目名称:tensorflow,代码行数:8,代码来源:distribute_coordinator_test.py

示例9: _thread_fn

 def _thread_fn(cluster_spec):
   distribute_coordinator.run_distribute_coordinator(
       None,
       MockStrategy(between_graph=True),
       mode=INDEPENDENT_WORKER,
       cluster_spec=cluster_spec,
       task_type="ps",
       task_id=0)
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:distribute_coordinator_test.py

示例10: testBetweenGraph

  def testBetweenGraph(self):
    """Test it runs between-graph replicated training correctly."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_worker_fn,
        cluster_spec=self._cluster_spec,
        between_graph=True)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py

示例11: testBetweenGraph

  def testBetweenGraph(self):
    """Test it runs between-graph replication in standalone client mode."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_worker_fn,
        MockStrategy(between_graph=True),
        cluster_spec=self._cluster_spec)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py

示例12: testBetweenGraphWithMonitoredSession

  def testBetweenGraphWithMonitoredSession(self):
    """Test monitored session in standalone client mode."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_with_monitored_session,
        MockStrategy(between_graph=True),
        cluster_spec=self._cluster_spec)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py

示例13: testLocalContext

  def testLocalContext(self):
    # Dumps the task contexts to the self._task_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_task_context, cluster_spec=None, between_graph=True)

    # There is only a "None" task.
    self.assertEqual(len(self._task_context), 1)
    self.assertTrue("None" in self._task_context)
    self.assertEqual(len(self._task_context["None"]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._task_context["None"][0], ("local", 0, True, False))
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:13,代码来源:distribute_coordinator_test.py

示例14: test_session_config_in_session_creator

  def test_session_config_in_session_creator(self):
    cluster_spec = {"worker": ["localhost:0"]}
    tf_config = {"cluster": cluster_spec}

    with test.mock.patch.dict("os.environ",
                              {"TF_CONFIG": json.dumps(tf_config)}):
      distribute_coordinator.run_distribute_coordinator(
          self._worker_fn,
          MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="worker",
          task_id=0)
    self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
    self.assertEqual(self._intra_op_parallelism_threads, 2)
    self.assertEqual(self._inter_op_parallelism_threads, 0)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:16,代码来源:distribute_coordinator_test.py

示例15: testInGraphContext

  def testInGraphContext(self):
    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=False),
        cluster_spec=self._cluster_spec)

    # There is only a "None" task in the dumped task context.
    self.assertEqual(len(self._worker_context), 1)
    self.assertTrue("None" in self._worker_context)
    self.assertEqual(len(self._worker_context["None"]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(
        self._worker_context["None"][0],
        (_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
开发者ID:aritratony,项目名称:tensorflow,代码行数:17,代码来源:distribute_coordinator_test.py


注:本文中的tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。