本文整理汇总了Python中tensorflow.python.eager.context.num_gpus函数的典型用法代码示例。如果您正苦于以下问题:Python num_gpus函数的具体用法?Python num_gpus怎么用?Python num_gpus使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了num_gpus函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decorated
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with ops.Graph().as_default(), context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
示例2: _get_distribution_strategy
def _get_distribution_strategy(self):
devices = ["/device:CPU:0", "/device:GPU:0"]
if GPU_TEST:
self.assertGreater(context.num_gpus(), 0)
if context.num_gpus() > 1:
devices = ["/device:GPU:0", "/device:GPU:1"]
print(self.id().split(".")[-1], "devices:", ", ".join(devices))
return mirrored_strategy.MirroredStrategy(devices)
示例3: test_end_to_end_keras_2_gpu
def test_end_to_end_keras_2_gpu(self):
if context.num_gpus() < 2:
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(2, context.num_gpus()))
integration.run_synthetic(
ncf_keras_main.main, tmp_root=self.get_temp_dir(), max_train=None,
extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '2'])
示例4: maybe_skip_test
def maybe_skip_test(test_case, is_tpu_required, num_gpus_required):
if is_tpu_required and not TPU_TEST:
test_case.skipTest("Test requires a TPU, but it's not available.")
if not is_tpu_required and TPU_TEST:
test_case.skipTest("Test that doesn't require a TPU.")
if not num_gpus_required:
if GPU_TEST:
test_case.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < num_gpus_required:
# TODO(priyag): Consider allowing tests in graph mode using soft
# placement.
test_case.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".format(
num_gpus_required, context.num_gpus()))
示例5: _all_devices
def _all_devices():
devices = []
tfconfig = TFConfigClusterResolver()
if tfconfig.cluster_spec().as_dict():
devices = _cluster_spec_to_device_list(tfconfig.cluster_spec(),
context.num_gpus())
return devices if devices else all_local_devices()
示例6: __init__
def __init__(self,
devices=None,
num_gpus=None,
cross_tower_ops=None,
prefetch_on_device=None):
super(MirroredStrategy, self).__init__()
# Convert `num_gpus` into `devices`, shouldn't specify both.
if devices is None:
if num_gpus is None:
num_gpus = context.num_gpus()
devices = ["/device:GPU:%d" % d for d in range(num_gpus)]
elif num_gpus is not None:
raise ValueError("Must only specify one of `devices` and `num_gpus`.")
assert devices, "Must specify at least one device."
assert len(set(devices)) == len(devices), (
"No duplicates allowed in `devices` argument.")
# TODO(josh11b): Require at least 2 devices?
self._devices = devices
self._canonical_device_set = set(
[device_util.canonicalize(d) for d in devices])
self._device_index = values.PerDevice(
dict((d, i) for i, d in enumerate(devices)))
self._cross_tower_ops = cross_tower_ops
self._prefetch_on_device = prefetch_on_device
示例7: _get_strategy_object
def _get_strategy_object(self, strategy_cls, eval_strategy=False):
if strategy_cls == mirrored_strategy.CoreMirroredStrategy:
if eval_strategy:
return strategy_cls()
else:
return strategy_cls(
cross_device_ops=self._make_cross_device_ops(
num_gpus_per_worker=context.num_gpus()))
elif (strategy_cls == mirrored_strategy.MirroredStrategy and
not eval_strategy):
return strategy_cls(
num_gpus_per_worker=context.num_gpus(),
cross_device_ops=self._make_cross_device_ops(
num_gpus_per_worker=context.num_gpus()))
else:
return strategy_cls(num_gpus_per_worker=context.num_gpus())
示例8: testSaveAndRestoreMirroredOneGraph
def testSaveAndRestoreMirroredOneGraph(self):
if context.num_gpus() < 1 and context.executing_eagerly():
# Graph mode can work without GPU because the Placer "moves" the
# variable to a CPU. In other words, if there is no GPU available, but
# user requested to create a variable on GPU, Placer will ignore the
# user request and assign the VarHandleOp to CPU. This requires
# soft_placement, which is on by default.
self.skipTest("A GPU is not available for this test in eager mode.")
with self.cached_session(config=self.config) as sess:
v, device_map, mirrored = _make_mirrored()
devices = device_map.all_devices
# Overwrite the initial values.
self._assign_mirrored(devices, v, [3., 4.])
# Saves the current value of v[0], 3.
save_path, saver = self._save_return_saver(sess, mirrored)
# Change the values between save and restore.
self._assign_mirrored(devices, v, [5., 6.])
# Restores the saved value of 3. to both variables.
saver.restore(sess, save_path)
self.assertEqual([3., 3.], self.evaluate([v[0], v[1]]))
示例9: _get_distribution_strategy
def _get_distribution_strategy(self):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
strategy = mirrored_strategy.MirroredStrategy(num_gpus=context.num_gpus())
strategy.configure(cluster_spec=cluster_spec)
return strategy
示例10: testMakeInputFnIteratorDistributed
def testMakeInputFnIteratorDistributed(self, num_gpus, use_core_strategy,
use_dataset):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i+j for j in range(num_gpus)]
for i in range(0, 100, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator(
'worker',
1,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
use_core_strategy=use_core_strategy)
示例11: DISABLED_testMakeInputFnIteratorLocal
def DISABLED_testMakeInputFnIteratorLocal(self, num_gpus, use_core_strategy,
use_dataset):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i+j for j in range(num_gpus)]
for i in range(0, 100, num_gpus)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_gpus,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0) # only one worker and pipeline for local.
self._test_input_fn_iterator(
None,
None,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
use_core_strategy=use_core_strategy)
示例12: DISABLED_testMakeInputFnIterator
def DISABLED_testMakeInputFnIterator(self, num_gpus, use_dataset,
use_core_strategy):
if context.num_gpus() < num_gpus:
self.skipTest('Not enough GPUs')
if use_dataset:
fn = lambda: dataset_ops.Dataset.range(100)
else:
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
# We use CPU as the device when num_gpus = 0
devices_per_worker = max(1, num_gpus)
expected_values = [[i+j for j in range(devices_per_worker)]
for i in range(0, 100, devices_per_worker)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=3*devices_per_worker,
expected_num_input_pipelines=3,
expected_input_pipeline_id=1) # because task_id = 1
self._test_input_fn_iterator(
'worker',
1,
num_gpus,
input_fn,
expected_values,
test_reinitialize=use_dataset,
use_core_strategy=use_core_strategy)
示例13: _independent_worker_fn
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
# TODO(rchao/yuefengz): The following is run by both worker and ps
# threads. The distribute coordinator should run std server immediately
# without configuring the session (or building the graph) on PS.
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
batch_size = 64
steps = 10
strategy = strategy_cls(num_gpus_per_worker=context.num_gpus())
verification_callback.is_between_graph = \
strategy.extended.experimental_between_graph
train_ds, _ = _mnist_synthetic_dataset(batch_size, steps)
val_ds, _ = _mnist_synthetic_dataset(batch_size, steps)
with strategy.scope():
model = _get_model((28, 28, 1))
# TODO(b/123868066): Verify callback for model.evaluate().
callbacks_for_fit = nest.flatten(
kwargs.get('verification_callback', []))
history = model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
validation_data=val_ds,
validation_steps=steps,
callbacks=callbacks_for_fit)
self.assertIsInstance(history, keras.callbacks.History)
示例14: benchmark_defun_matmul_100_by_784_GPU
def benchmark_defun_matmul_100_by_784_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_100_by_784.gpu()
self._benchmark_defun_matmul(
m, transpose_b=True, num_iters=self._num_iters_100_by_784)
示例15: benchmark_defun_matmul_2_by_2_GPU
def benchmark_defun_matmul_2_by_2_GPU(self):
if not context.num_gpus():
return
with context.device(GPU):
m = self._m_2_by_2.gpu()
self._benchmark_defun_matmul(
m, transpose_b=False, num_iters=self._num_iters_2_by_2)