本文整理汇总了Python中horovod.tensorflow.allreduce方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.allreduce方法的具体用法?Python tensorflow.allreduce怎么用?Python tensorflow.allreduce使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类horovod.tensorflow
的用法示例。
在下文中一共展示了tensorflow.allreduce方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_state
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def update_state(self, sparse_predictions, samples, logit_length=None):
""" Accumulate errors and counts """
validated_label = tf.cast(
tf.sparse.from_dense(samples["output"]), dtype=tf.int64
)
labels_counter = tf.cast(tf.shape(validated_label.values)[0], tf.float32)
num_errs = tf.edit_distance(
sparse_predictions, validated_label, normalize=False
)
num_errs = tf.reduce_sum(num_errs)
if self.rank_size > 1:
num_errs = hvd.allreduce(num_errs, average=False)
labels_counter = hvd.allreduce(labels_counter, average=False)
self.error_count(num_errs)
self.total_count(labels_counter)
return num_errs, labels_counter
示例2: get_gradients
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def get_gradients(self, loss, params):
"""
Compute gradients of all trainable variables.
See Optimizer.get_gradients() for more info.
In DistributedOptimizer, get_gradients() is overriden to also
allreduce the gradients before returning them.
"""
gradients = super(self.__class__, self).get_gradients(loss, params)
if hvd.size() > 1:
averaged_gradients = []
with tf.name_scope(self._name + "_Allreduce"):
for grad in gradients:
if grad is not None:
avg_grad = hvd.allreduce(grad, device_dense=self._device_dense,
device_sparse=self._device_sparse)
averaged_gradients.append(avg_grad)
else:
averaged_gradients.append(None)
return averaged_gradients
else:
return gradients
示例3: DistributedOptimizer
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def DistributedOptimizer(optimizer, name=None, device_dense='', device_sparse=''):
"""
An optimizer that wraps another keras.optimizers.Optimizer, using an allreduce to
average gradient values before applying gradients to model weights.
Args:
optimizer: Optimizer to use for computing gradients and applying updates.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Distributed" followed by the provided
optimizer type.
device_dense: Device to be used for dense tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_ALLREDUCE.
device_sparse: Device to be used for sparse tensors. Uses GPU by default
if Horovod was build with HOROVOD_GPU_ALLGATHER.
"""
# We dynamically create a new class that inherits from the optimizer that was passed in.
# The goal is to override get_gradients() method with an allreduce implementation.
# This class will have the same name as the optimizer it's wrapping, so that the saved
# model could be easily restored without Horovod.
cls = type(optimizer.__class__.__name__, (optimizer.__class__,),
dict(_DistributedOptimizer.__dict__))
return cls(name, device_dense, device_sparse, **optimizer.get_config())
示例4: test_horovod_allreduce_type_error
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
return
with self.test_session(config=self.config) as session:
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims,
dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(hvd.allreduce(tensor))
示例5: test_horovod_allreduce_cpu_gpu_error
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
return
device = "/gpu:%d" % local_rank if local_rank % 2 == 0 else "/cpu:0"
with self.test_session(config=self.config) as session:
with tf.device(device):
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(hvd.allreduce(tensor))
示例6: _setup_graph
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def _setup_graph(self):
num_gpu = cfg.TRAIN.NUM_GPUS
if cfg.TRAINER == 'replicated':
# TF bug in version 1.11, 1.12: https://github.com/tensorflow/tensorflow/issues/22750
buggy_tf = get_tf_version_tuple() in [(1, 11), (1, 12)]
# Use two predictor threads per GPU to get better throughput
self.num_predictor = num_gpu if buggy_tf else num_gpu * 2
self.predictors = [self._build_predictor(k % num_gpu) for k in range(self.num_predictor)]
self.dataflows = [get_eval_dataflow(self._eval_dataset,
shard=k, num_shards=self.num_predictor)
for k in range(self.num_predictor)]
else:
# Only eval on the first machine,
# Because evaluation assumes that all horovod workers share the filesystem.
# Alternatively, can eval on all ranks and use allgather, but allgather sometimes hangs
self._horovod_run_eval = hvd.rank() == hvd.local_rank()
if self._horovod_run_eval:
self.predictor = self._build_predictor(0)
self.dataflow = get_eval_dataflow(self._eval_dataset,
shard=hvd.local_rank(), num_shards=hvd.local_size())
self.barrier = hvd.allreduce(tf.random_normal(shape=[1]))
示例7: _setup_graph
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def _setup_graph(self):
self._placeholder = tf.placeholder(tf.float32, shape=[2], name='to_be_reduced')
self._reduced = hvd.allreduce(self._placeholder, average=False)
示例8: allreduce
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def allreduce(value, name=None, average=True):
"""
Perform an allreduce on a tensor-compatible value.
Arguments:
value: A tensor-compatible value to reduce.
The shape of the input must be identical across all ranks.
name: Optional name for the constants created by this operation.
average: If True, computes the average over all ranks.
Otherwise, computes the sum over all ranks.
"""
allreduce_op = hvd.allreduce(tf.constant(value, name=name), average=average)
return K.get_session().run(allreduce_op)
示例9: __init__
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def __init__(self, device=''):
"""
Construct a new MetricAverageCallback that will average metrics
across all processes at the end of the epoch.
Args:
device: Device to be used for allreduce. Uses GPU by default
if Horovod was build with HOROVOD_GPU_ALLREDUCE.
"""
super(MetricAverageCallback, self).__init__()
self.variables = {}
self.allreduce_ops = {}
self.device = device
示例10: _make_variable
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def _make_variable(self, metric, value):
with tf.name_scope('MetricAverageCallback'):
var = tf.Variable(value, name=metric)
K.get_session().run(var.initializer)
allreduce_op = hvd.allreduce(var, device_dense=self.device)
return var, allreduce_op
示例11: test_horovod_allreduce_average
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_average(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
with self.test_session() as session:
dtypes = [tf.int32, tf.int64, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=True)
max_difference = tf.reduce_max(tf.abs(summed - tensor))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if dtype in [tf.int32, tf.int64]:
threshold = hvd.size()
elif size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce produces incorrect results")
示例12: test_horovod_allreduce_cpu_fused
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_cpu_fused(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
with self.test_session(config=self.config) as session:
dtypes = [tf.int32, tf.int64, tf.float32, tf.float64]
dims = [1, 2, 3]
tests = []
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
test = max_difference <= threshold
tests.append(test)
self.assertTrue(session.run(tf.reduce_all(tests)),
"hvd.allreduce produces incorrect results")
示例13: test_horovod_allreduce_gpu
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_gpu(self):
"""Test that the allreduce works on GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
with self.test_session(config=self.config) as session:
dtypes = [tf.int32, tf.int64, tf.float16, tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/gpu:%d" % local_rank):
tf.set_random_seed(1234)
tensor = tf.random_uniform(
[17] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [tf.int32, tf.int64]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"hvd.allreduce on GPU produces incorrect results")
示例14: test_horovod_allreduce_grad
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
with self.test_session(config=self.config) as session:
# As of TensorFlow v1.9, gradients are not supported on
# integer tensors
dtypes = [tf.float32, tf.float64]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
with tf.device("/cpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform(
[5] * dim, -100, 100, dtype=dtype)
summed = hvd.allreduce(tensor, average=False)
grad_ys = tf.ones([5] * dim)
grad = tf.gradients(summed, tensor, grad_ys)[0]
grad_out = session.run(grad)
expected = np.ones([5] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
示例15: _add_aggregation_ops
# 需要导入模块: from horovod import tensorflow [as 别名]
# 或者: from horovod.tensorflow import allreduce [as 别名]
def _add_aggregation_ops(gradients_info, op_to_control_consumer_ops, config):
grad_tensor = gradients_info._grad
if isinstance(grad_tensor, tf.Tensor):
grad = grad_tensor
grad_consumers = [c for c in grad.consumers()]
agg_grad = hvd.allreduce(grad,
average=True)
update_consumers(grad_consumers, grad, agg_grad)
update_control_consumers(op_to_control_consumer_ops[grad.op],
grad.op, agg_grad.op)
else:
grad = grad_tensor.values
indices = grad_tensor.indices
dense_shape = grad_tensor.dense_shape
grad_consumers = [c for c in grad.consumers()]
indices_consumers = [c for c in indices.consumers()]
agg_grad = \
hvd.allreduce(tf.IndexedSlices(grad, indices, dense_shape),
average=config.average_sparse)
update_consumers(grad_consumers, grad, agg_grad.values)
update_consumers(indices_consumers, indices, agg_grad.indices)
update_control_consumers(op_to_control_consumer_ops[grad.op],
grad.op, agg_grad.values.op)
update_control_consumers(
op_to_control_consumer_ops[indices.op], indices.op,
agg_grad.indices.op)
gradients_info._grad = agg_grad