当前位置: 首页>>代码示例>>Python>>正文


Python tpu_function.get_tpu_context方法代码示例

本文整理汇总了Python中tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context方法的典型用法代码示例。如果您正苦于以下问题:Python tpu_function.get_tpu_context方法的具体用法?Python tpu_function.get_tpu_context怎么用?Python tpu_function.get_tpu_context使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.tpu.python.tpu.tpu_function的用法示例。


在下文中一共展示了tpu_function.get_tpu_context方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_num_replicas

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def get_num_replicas():
  """Returns the number of replicas.

  If not operating in a supported replicated context this function will return
  1.
  """

  tf_replicator = get_tf_replicator()

  if tf_replicator:
    return tf_replicator.num_replicas_in_sync
  elif tf.distribute.has_strategy():
    return tf.distribute.get_strategy().num_replicas_in_sync
  else:
    # I'm assuming replicas and shards are always equal until someone tells me
    # different.
    num_replicas = tpu_function.get_tpu_context().number_of_shards
    if num_replicas:
      return num_replicas
    else:
      return 1 
开发者ID:tensorflow,项目名称:kfac,代码行数:23,代码来源:utils.py

示例2: _moments

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def _moments(self, inputs, reduction_axes, keep_dims):
    """Compute the mean and variance: it overrides the original _moments."""
    shard_mean, shard_variance = super(TpuBatchNormalization, self)._moments(
        inputs, reduction_axes, keep_dims=keep_dims)

    num_shards = tpu_function.get_tpu_context().number_of_shards or 1
    if num_shards <= 8:  # Skip cross_replica for 2x2 or smaller slices.
      num_shards_per_group = 1
    else:
      num_shards_per_group = max(8, num_shards // 8)
    tf.logging.info('TpuBatchNormalization with num_shards_per_group %s',
                    num_shards_per_group)
    if num_shards_per_group > 1:
      # Compute variance using: Var[X]= E[X^2] - E[X]^2.
      shard_square_of_mean = tf.math.square(shard_mean)
      shard_mean_of_square = shard_variance + shard_square_of_mean
      group_mean = self._cross_replica_average(
          shard_mean, num_shards_per_group)
      group_mean_of_square = self._cross_replica_average(
          shard_mean_of_square, num_shards_per_group)
      group_variance = group_mean_of_square - tf.math.square(group_mean)
      return (group_mean, group_variance)
    else:
      return (shard_mean, shard_variance) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:26,代码来源:utils.py

示例3: _cross_replica_average

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def _cross_replica_average(self, t, num_shards_per_group):
        """Calculates the average value of input tensor across TPU replicas."""
        num_shards = tpu_function.get_tpu_context().number_of_shards
        group_assignment = None
        if num_shards_per_group > 1:
            if num_shards % num_shards_per_group != 0:
                raise ValueError(
                    "num_shards: %d mod shards_per_group: %d, should be 0"
                    % (num_shards, num_shards_per_group)
                )
            num_groups = num_shards // num_shards_per_group
            group_assignment = [
                [x for x in range(num_shards) if x // num_shards_per_group == y]
                for y in range(num_groups)
            ]
        return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
            num_shards_per_group, t.dtype
        ) 
开发者ID:neuroailab,项目名称:tfutils,代码行数:20,代码来源:crossdevice_batchnorm.py

示例4: cross_replica_mean

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def cross_replica_mean(inputs, group_size=None):
  """Calculates the average value of inputs tensor across TPU replicas."""
  num_replicas = get_tpu_context().number_of_shards
  if not group_size:
    group_size = num_replicas
  if group_size == 1:
    return inputs
  if group_size != num_replicas:
    group_assignment = []
    assert num_replicas % group_size == 0
    for g in range(num_replicas // group_size):
      replica_ids = [g * group_size + i for i in range(group_size)]
      group_assignment.append(replica_ids)
  else:
    group_assignment = None
  return tf.contrib.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
      group_size, inputs.dtype) 
开发者ID:google-research,项目名称:s4l,代码行数:19,代码来源:tpu_ops.py

示例5: cross_replica_mean

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def cross_replica_mean(inputs, group_size=None):
  """Calculates the average value of inputs tensor across TPU replicas."""
  num_replicas = tpu_function.get_tpu_context().number_of_shards
  if not group_size:
    group_size = num_replicas
  if group_size == 1:
    return inputs
  if group_size != num_replicas:
    group_assignment = []
    assert num_replicas % group_size == 0
    for g in range(num_replicas // group_size):
      replica_ids = [g * group_size + i for i in range(group_size)]
      group_assignment.append(replica_ids)
  else:
    group_assignment = None
  return tf.contrib.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
      group_size, inputs.dtype) 
开发者ID:google,项目名称:compare_gan,代码行数:19,代码来源:tpu_ops.py

示例6: is_tpu_replicated

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def is_tpu_replicated():
  is_tpu_strategy = (tf.distribute.has_strategy() and
                     tf.distribute.get_replica_context() and
                     isinstance(tf.distribute.get_strategy(),
                                tf.distribute.experimental.TPUStrategy))
  num_shards = tpu_function.get_tpu_context().number_of_shards
  return is_tpu_strategy or num_shards is not None 
开发者ID:tensorflow,项目名称:kfac,代码行数:9,代码来源:utils.py

示例7: _cross_replica_average

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def _cross_replica_average(self, t, num_shards_per_group):
    """Calculates the average value of input tensor across TPU replicas."""
    num_shards = tpu_function.get_tpu_context().number_of_shards
    group_assignment = None
    if num_shards_per_group > 1:
      if num_shards % num_shards_per_group != 0:
        raise ValueError('num_shards: %d mod shards_per_group: %d, should be 0'
                         % (num_shards, num_shards_per_group))
      num_groups = num_shards // num_shards_per_group
      group_assignment = [[
          x for x in range(num_shards) if x // num_shards_per_group == y
      ] for y in range(num_groups)]
    return tpu_ops.cross_replica_sum(t, group_assignment) / tf.cast(
        num_shards_per_group, t.dtype) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:16,代码来源:utils.py

示例8: cross_replica_average

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def cross_replica_average(t, num_groups=1):
  """Calculates the average value of input tensor across TPU replicas."""
  num_shards = tpu_function.get_tpu_context().number_of_shards
  num_shards_per_group = 1
  group_assignment = None
  if num_groups > 0:
    if num_shards % num_groups != 0:
      raise ValueError('num_shards: %d mod num_groups: %d, should be 0' %
                       (num_shards, num_groups))
    num_shards_per_group = num_shards // num_groups
    group_assignment = [[
        x for x in range(num_shards) if x // num_shards_per_group == y
    ] for y in range(num_groups)]
  return tpu_ops.cross_replica_sum(t, group_assignment) / math_ops.cast(
      num_shards_per_group, t.dtype) 
开发者ID:artyompal,项目名称:tpu_models,代码行数:17,代码来源:tpu_normalization.py

示例9: compute_gradients

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def compute_gradients(self, loss, var_list=None, **kwargs):
        """ This is adapted from:
        https://github.com/tensorflow/tensorflow/blob/r1.13/tensorflow/contrib/tpu/python/tpu/tpu_optimizer.py#L100
        loss is a list of lists of outer length num_optimizers.
        Therefore, for each optimizer's loss, we multiply each loss by the
        scale
        """
        num_shards = tpu_function.get_tpu_context().number_of_shards
        if num_shards is None:
            logging.warning(
                    "CrossShardMultiOptimizer should be used within a tpu_shard_context, but "
                    "got unset number_of_shards. Assuming 1.")
            num_shards = 1

        subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment,
                                                           num_shards)

        if self._multi_mode:
            if not isinstance(loss, list):
                loss = [loss]
            scaled_losses = []
            for opt_idx, curr_loss in enumerate(loss):
                scaled_loss = self._rescale_loss(curr_loss, num_shards, subgroup_size)
                scaled_losses.insert(opt_idx, scaled_loss)
        else:
            scaled_losses = self._rescale_loss(loss, num_shards, subgroup_size)

        return self._opt.compute_gradients(scaled_losses, var_list=var_list, **kwargs) 
开发者ID:neuroailab,项目名称:tfutils,代码行数:30,代码来源:tpu_optimizer.py

示例10: _moments

# 需要导入模块: from tensorflow.contrib.tpu.python.tpu import tpu_function [as 别名]
# 或者: from tensorflow.contrib.tpu.python.tpu.tpu_function import get_tpu_context [as 别名]
def _moments(self, inputs, reduction_axes, keep_dims):
        """Compute the mean and variance: it overrides the original _moments."""
        shard_mean, shard_variance = super(CRTPUBatchNormalization, self)._moments(
            inputs, reduction_axes, keep_dims=keep_dims
        )

        num_shards = tpu_function.get_tpu_context().number_of_shards or 1
        if num_shards < 8:  # Skip cross_replica for 2x2 or smaller slices. Note: original code has <= 8, but we want to do this on standard TPUs where num_shards == 8.
            num_shards_per_group = 1
        else:
            num_shards_per_group = max(8, num_shards // 8)
        tf.logging.info(
            "CRTPUBatchNormalization with num_shards_per_group %s", num_shards_per_group
        )
        if num_shards_per_group > 1:
            # Each group has multiple replicas: here we compute group mean/variance by
            # aggregating per-replica mean/variance.
            group_mean = self._cross_replica_average(shard_mean, num_shards_per_group)
            group_variance = self._cross_replica_average(
                shard_variance, num_shards_per_group
            )

            # Group variance needs to also include the difference between shard_mean
            # and group_mean. Note: this is from an older version of the code,
            # but I prefer this as it explicitly avoids needing to relu E[X^2] - E[X]^2
            # in case of numerical issues to prevent small negative variances.
            mean_distance = tf.square(group_mean - shard_mean)
            group_variance += self._cross_replica_average(
                mean_distance, num_shards_per_group
            )
            return (group_mean, group_variance)
        else:
            return (shard_mean, shard_variance) 
开发者ID:neuroailab,项目名称:tfutils,代码行数:35,代码来源:crossdevice_batchnorm.py


注:本文中的tensorflow.contrib.tpu.python.tpu.tpu_function.get_tpu_context方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。