當前位置: 首頁>>代碼示例>>Python>>正文


Python tpu_ops.cross_replica_sum方法代碼示例

本文整理匯總了Python中tensorflow.contrib.tpu.python.ops.tpu_ops.cross_replica_sum方法的典型用法代碼示例。如果您正苦於以下問題:Python tpu_ops.cross_replica_sum方法的具體用法?Python tpu_ops.cross_replica_sum怎麽用?Python tpu_ops.cross_replica_sum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.tpu.python.ops.tpu_ops的用法示例。


在下文中一共展示了tpu_ops.cross_replica_sum方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: cross_replica_average

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(inputs, num_shards=None, num_shards_per_group=None):
  """Customized cross replica sum op."""
  # if num_shards_per_group is defined, apply distributed batch norm.
  group_assignment = None
  if num_shards_per_group > 0:
    if num_shards % num_shards_per_group != 0:
      raise ValueError(
          'num_shards: %d mod num_shards_per_group: %d, should be 0' %
          (num_shards, num_shards_per_group))
    num_groups = num_shards // num_shards_per_group
    group_assignment = [[
        x for x in range(num_shards) if x // num_shards_per_group == y
    ] for y in range(num_groups)]

  return tpu_ops.cross_replica_sum(inputs, group_assignment) / math_ops.cast(
      num_shards_per_group, inputs.dtype) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:18,代碼來源:ssd_architecture.py

示例2: apply_gradients

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
        """ This is adapted from:
        https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py#L142
        The intention here is to deal with the case of having multiple
        optimizers, wherein grads_and_vars is a list of lists of outer length
        num_optimizers.
        Therefore, for each optimizer's grads and vars, we compute the
        cross_replica_sum of those gradients across replicas.
        """
        if self._multi_mode:
            summed_grads_and_vars = []
            for opt_idx, curr_gv in enumerate(grads_and_vars):
                curr_summed_grads_and_vars = self._cross_replica_sum(curr_gv)
                summed_grads_and_vars.insert(opt_idx, curr_summed_grads_and_vars)
        else:
            summed_grads_and_vars = self._cross_replica_sum(grads_and_vars)

        return self._opt.apply_gradients(summed_grads_and_vars, global_step, name) 
開發者ID:neuroailab,項目名稱:tfutils,代碼行數:20,代碼來源:tpu_optimizer.py

示例3: _cross_replica_average

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_average(self, t, num_shards_per_group):
        """Calculates the average value of input tensor across TPU replicas."""
        num_shards = tpu_function.get_tpu_context().number_of_shards
        group_assignment = None
        if num_shards_per_group > 1:
            if num_shards % num_shards_per_group != 0:
                raise ValueError(
                    "num_shards: %d mod shards_per_group: %d, should be 0"
                    % (num_shards, num_shards_per_group)
                )
            num_groups = num_shards // num_shards_per_group
            group_assignment = [
                [x for x in range(num_shards) if x // num_shards_per_group == y]
                for y in range(num_groups)
            ]
        return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
            num_shards_per_group, t.dtype
        ) 
開發者ID:neuroailab,項目名稱:tfutils,代碼行數:20,代碼來源:crossdevice_batchnorm.py

示例4: cross_replica_average

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(inputs, num_shards, distributed_group_size):
  """Calculates the average value of inputs tensor across TPU replicas."""
  group_assignment = None
  if num_shards is not None and distributed_group_size != num_shards:
    group_size = distributed_group_size
    group_assignment = []
    for g in range(num_shards // group_size):
      replica_ids = [g * group_size + i for i in range(group_size)]
      group_assignment.append(replica_ids)

  return tpu_ops.cross_replica_sum(inputs, group_assignment) / tf.cast(
      distributed_group_size, inputs.dtype) 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:14,代碼來源:network_utils.py

示例5: _cross_replica_average

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_average(self, t, num_shards_per_group):
    """Calculates the average value of input tensor across TPU replicas."""
    num_shards = tpu_function.get_tpu_context().number_of_shards
    group_assignment = None
    if num_shards_per_group > 1:
      if num_shards % num_shards_per_group != 0:
        raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
                         % (num_shards, num_shards_per_group))
      num_groups = num_shards // num_shards_per_group
      group_assignment = [[
          x for x in range(num_shards) if x // num_shards_per_group == y
      ] for y in range(num_groups)]
    return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
        num_shards_per_group, t.dtype) 
開發者ID:artyompal,項目名稱:tpu_models,代碼行數:16,代碼來源:utils.py

示例6: cross_replica_average

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def cross_replica_average(t, num_groups=1):
  """Calculates the average value of input tensor across TPU replicas."""
  num_shards = tpu_function.get_tpu_context().number_of_shards
  num_shards_per_group = 1
  group_assignment = None
  if num_groups > 0:
    if num_shards % num_groups != 0:
      raise ValueError('num_shards: %d mod num_groups: %d, should be 0' %
                       (num_shards, num_groups))
    num_shards_per_group = num_shards // num_groups
    group_assignment = [[
        x for x in range(num_shards) if x // num_shards_per_group == y
    ] for y in range(num_groups)]
  return tpu_ops.cross_replica_sum(t, group_assignment) / math_ops.cast(
      num_shards_per_group, t.dtype) 
開發者ID:artyompal,項目名稱:tpu_models,代碼行數:17,代碼來源:tpu_normalization.py

示例7: _cross_replica_sum

# 需要導入模塊: from tensorflow.contrib.tpu.python.ops import tpu_ops [as 別名]
# 或者: from tensorflow.contrib.tpu.python.ops.tpu_ops import cross_replica_sum [as 別名]
def _cross_replica_sum(self, grads_and_vars):
        summed_grads_and_vars = []
        for (grad, var) in grads_and_vars:
            if grad is None:
                summed_grads_and_vars.append((grad, var))
            else:
                with ops.colocate_with(grad):
                    summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
                            grad, self._group_assignment), var))
        return summed_grads_and_vars 
開發者ID:neuroailab,項目名稱:tfutils,代碼行數:12,代碼來源:tpu_optimizer.py


注:本文中的tensorflow.contrib.tpu.python.ops.tpu_ops.cross_replica_sum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。