本文整理匯總了Python中tensorflow.contrib.tpu.python.ops.tpu_ops.cross_replica_sum方法的典型用法代碼示例。如果您正苦於以下問題:Python tpu_ops.cross_replica_sum方法的具體用法?Python tpu_ops.cross_replica_sum怎麽用?Python tpu_ops.cross_replica_sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.contrib.tpu.python.ops.tpu_ops
的用法示例。
在下文中一共展示了tpu_ops.cross_replica_sum方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: cross_replica_average
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(inputs, num_shards=None, num_shards_per_group=None):
"""Customized cross replica sum op."""
# if num_shards_per_group is defined, apply distributed batch norm.
group_assignment = None
if num_shards_per_group > 0:
if num_shards % num_shards_per_group != 0:
raise ValueError(
'num_shards: %d mod num_shards_per_group: %d, should be 0' %
(num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tpu_ops.cross_replica_sum(inputs, group_assignment) / math_ops.cast(
num_shards_per_group, inputs.dtype)
示例2: apply_gradients
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
""" This is adapted from:
https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py#L142
The intention here is to deal with the case of having multiple
optimizers, wherein grads_and_vars is a list of lists of outer length
num_optimizers.
Therefore, for each optimizer's grads and vars, we compute the
cross_replica_sum of those gradients across replicas.
"""
if self._multi_mode:
summed_grads_and_vars = []
for opt_idx, curr_gv in enumerate(grads_and_vars):
curr_summed_grads_and_vars = self._cross_replica_sum(curr_gv)
summed_grads_and_vars.insert(opt_idx, curr_summed_grads_and_vars)
else:
summed_grads_and_vars = self._cross_replica_sum(grads_and_vars)
return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
示例3: _cross_replica_average
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError(
"num_shards: %d mod shards_per_group: %d, should be 0"
% (num_shards, num_shards_per_group)
)
num_groups = num_shards // num_shards_per_group
group_assignment = [
[x for x in range(num_shards) if x // num_shards_per_group == y]
for y in range(num_groups)
]
return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype
)
示例4: cross_replica_average
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(inputs, num_shards, distributed_group_size):
"""Calculates the average value of inputs tensor across TPU replicas."""
group_assignment = None
if num_shards is not None and distributed_group_size != num_shards:
group_size = distributed_group_size
group_assignment = []
for g in range(num_shards // group_size):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
return tpu_ops.cross_replica_sum(inputs, group_assignment) / tf.cast(
distributed_group_size, inputs.dtype)
示例5: _cross_replica_average
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_average(self, t, num_shards_per_group):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
group_assignment = None
if num_shards_per_group > 1:
if num_shards % num_shards_per_group != 0:
raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
% (num_shards, num_shards_per_group))
num_groups = num_shards // num_shards_per_group
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
num_shards_per_group, t.dtype)
示例6: cross_replica_average
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(t, num_groups=1):
"""Calculates the average value of input tensor across TPU replicas."""
num_shards = tpu_function.get_tpu_context().number_of_shards
num_shards_per_group = 1
group_assignment = None
if num_groups > 0:
if num_shards % num_groups != 0:
raise ValueError('num_shards: %d mod num_groups: %d, should be 0' %
(num_shards, num_groups))
num_shards_per_group = num_shards // num_groups
group_assignment = [[
x for x in range(num_shards) if x // num_shards_per_group == y
] for y in range(num_groups)]
return tpu_ops.cross_replica_sum(t, group_assignment) / math_ops.cast(
num_shards_per_group, t.dtype)
示例7: _cross_replica_sum
# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_sum(self, grads_and_vars):
summed_grads_and_vars = []
for (grad, var) in grads_and_vars:
if grad is None:
summed_grads_and_vars.append((grad, var))
else:
with ops.colocate_with(grad):
summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
grad, self._group_assignment), var))
return summed_grads_and_vars