当前位置: 首页>>代码示例>>Python>>正文


Python v1.cond方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.cond方法的典型用法代码示例。如果您正苦于以下问题:Python v1.cond方法的具体用法?Python v1.cond怎么用?Python v1.cond使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.cond方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _apply_cond

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(
            tf.zeros_like(grad_acc), use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:multistep_with_adamoptimizer.py

示例2: _apply_cond

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _apply_cond(self, apply_fn, grad, var, *args, **kwargs):
    """Apply conditionally if counter is zero."""
    grad_acc = self.get_slot(var, "grad_acc")

    def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs):
      total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype)
      adam_op = apply_fn(total_grad, var, *args, **kwargs)
      with tf.control_dependencies([adam_op]):
        grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc),
                                              use_locking=self._use_locking)
      return tf.group(adam_op, grad_acc_to_zero_op)

    def accumulate_gradient(grad_acc, grad):
      assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking)
      return tf.group(assign_op)  # Strip return value

    return tf.cond(
        tf.equal(self._get_iter_variable(), 0),
        lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs),
        lambda: accumulate_gradient(grad_acc, grad)) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:22,代码来源:multistep_optimizer.py

示例3: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:multistep_optimizer.py

示例4: pool

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)):
  """Pooling (supports "LEFT")."""
  with tf.name_scope("pool", values=[inputs]):
    static_shape = inputs.get_shape()
    if not static_shape or len(static_shape) != 4:
      raise ValueError("Inputs to conv must have statically known rank 4.")
    # Add support for left padding.
    if padding == "LEFT":
      assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1
      if len(static_shape) == 3:
        width_padding = 2 * (window_size[1] // 2)
        padding_ = [[0, 0], [width_padding, 0], [0, 0]]
      else:
        height_padding = 2 * (window_size[0] // 2)
        cond_padding = tf.cond(
            tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),
            lambda: tf.constant(2 * (window_size[1] // 2)))
        width_padding = 0 if static_shape[2] == 1 else cond_padding
        padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]
      inputs = tf.pad(inputs, padding_)
      inputs.set_shape([static_shape[0], None, None, static_shape[3]])
      padding = "VALID"

  return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:26,代码来源:common_layers.py

示例5: get_variable_ddi

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def get_variable_ddi(
    name, shape, value, init, initializer=None, dtype=tf.float32,
    regularizer=None, trainable=True):
  """Wrapper for data-dependent initialization."""
  kwargs = {"trainable": trainable}
  if initializer:
    kwargs["initializer"] = initializer
  if regularizer:
    kwargs["regularizer"] = regularizer
  w = tf.get_variable(name, shape, dtype, **kwargs)
  if isinstance(init, bool):
    if init:
      return assign(w, value)
    return w
  else:
    return tf.cond(init, lambda: assign(w, value), lambda: w) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:transformer_glow_layers_ops.py

示例6: _random_crop

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _random_crop(image, size):
  """Make a random crop of (`size` x `size`)."""
  bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
  random_image, bbox = distorted_bounding_box_crop(
      image,
      bbox,
      min_object_covered=0.1,
      aspect_ratio_range=(3. / 4, 4. / 3.),
      area_range=(0.08, 1.0),
      max_attempts=1,
      scope=None)
  bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3)

  image = tf.cond(
      bad, lambda: _center_crop(_do_scale(image, size), size),
      lambda: tf.image.resize_bicubic([random_image], [size, size])[0])
  return image 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:19,代码来源:imagenet.py

示例7: softmax_summaries

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def softmax_summaries(loss, logits, one_hot_labels, name="softmax"):
  """Create the softmax summaries for this cross entropy loss.

  Args:
    loss: Cross-entropy loss.
    logits: The [batch_size, classes] float tensor representing the logits.
    one_hot_labels: The float tensor representing actual class ids. If this is
      [batch_size, classes], then we take the argmax of it first.
    name: Prepended to summary scope.
  """
  tf.summary.scalar(name + "_loss", loss)

  one_hot_labels = tf.cond(
      tf.equal(tf.rank(one_hot_labels),
               2), lambda: tf.to_int32(tf.argmax(one_hot_labels, 1)),
      lambda: tf.to_int32(one_hot_labels))

  in_top_1 = tf.nn.in_top_k(logits, one_hot_labels, 1)
  tf.summary.scalar(name + "_precision@1",
                    tf.reduce_mean(tf.to_float(in_top_1)))
  in_top_5 = tf.nn.in_top_k(logits, one_hot_labels, 5)
  tf.summary.scalar(name + "_precision@5",
                    tf.reduce_mean(tf.to_float(in_top_5))) 
开发者ID:magenta,项目名称:magenta,代码行数:25,代码来源:utils.py

示例8: _decode_masks

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _decode_masks(self, parsed_tensors):
    """Decode a set of PNG masks to the tf.float32 tensors."""
    def _decode_png_mask(png_bytes):
      mask = tf.squeeze(
          tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
      mask = tf.cast(mask, dtype=tf.float32)
      mask.set_shape([None, None])
      return mask

    height = parsed_tensors['image/height']
    width = parsed_tensors['image/width']
    masks = parsed_tensors['image/object/mask']
    return tf.cond(
        tf.greater(tf.size(masks), 0),
        lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
        lambda: tf.zeros([0, height, width], dtype=tf.float32)) 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:18,代码来源:tf_example_decoder.py

示例9: top_k_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def top_k_logits(logits, k):
    if k == 0:
        # no truncation
        return logits

    def _top_k():
        values, _ = tf.nn.top_k(logits, k=k)
        min_values = values[:, -1, tf.newaxis]
        return tf.where(
            logits < min_values,
            tf.ones_like(logits, dtype=logits.dtype) * -1e10,
            logits,
        )
    return tf.cond(
        tf.equal(k, 0),
        lambda: logits,
        lambda: _top_k(),
    ) 
开发者ID:re-search,项目名称:gpt2-estimator,代码行数:20,代码来源:sample.py

示例10: _decode_and_random_crop

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _decode_and_random_crop(image_bytes, image_size):
  """Make a random crop of image_size."""
  bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
  image = distorted_bounding_box_crop(
      image_bytes,
      bbox,
      min_object_covered=0.1,
      aspect_ratio_range=(3. / 4, 4. / 3.),
      area_range=(0.08, 1.0),
      max_attempts=10,
      scope=None)
  original_shape = tf.image.extract_jpeg_shape(image_bytes)
  bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)

  image = tf.cond(
      bad,
      lambda: _decode_and_center_crop(image_bytes, image_size),
      lambda: tf.image.resize_bicubic([image],  # pylint: disable=g-long-lambda
                                      [image_size, image_size])[0])

  return image 
开发者ID:lukemelas,项目名称:EfficientNet-PyTorch,代码行数:23,代码来源:preprocessing.py

示例11: project_weights_to_r

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def project_weights_to_r(self, force=False):
    """Normalize the weights to the R-ball.

    Args:
      force: True to normalize regardless of previous weight values.
        False to check if weights > R-ball and only normalize then.

    Raises:
      Exception: If not called from inside this optimizer context.
    """
    if not self._is_init:
      raise Exception('This method must be called from within the optimizer\'s '
                      'context.')
    radius = self.loss.radius()
    for layer in self.layers:
      weight_norm = tf.norm(layer.kernel, axis=0)
      if force:
        layer.kernel = layer.kernel / (weight_norm / radius)
      else:
        layer.kernel = tf.cond(
            tf.reduce_sum(tf.cast(weight_norm > radius, dtype=self.dtype)) > 0,
            lambda k=layer.kernel, w=weight_norm, r=radius: k / (w / r),  # pylint: disable=cell-var-from-loop
            lambda k=layer.kernel: k  # pylint: disable=cell-var-from-loop
        ) 
开发者ID:tensorflow,项目名称:privacy,代码行数:26,代码来源:optimizers.py

示例12: maybe_add_noise

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def maybe_add_noise(image, noise_shape, scale0, scale1,
                    image_noise_probability, image_noise_ratio):
  """Add noise at two scales."""

  if image_noise_probability < 0.000001 or (
      image_noise_ratio < 0.000001):
    return image

  noise_list = []
  for scale in [scale0, scale1]:
    rand_image_noise_ratio = tf.random.uniform(
        shape=[], minval=0.0, maxval=image_noise_ratio)
    noise_list.append(
        _rand_noise(0.0, rand_image_noise_ratio, scale, noise_shape))

  skip_noise = tf.greater(tf.random.uniform([]), image_noise_probability)
  image = tf.cond(skip_noise,
                  lambda: image, lambda: image + noise_list[0])
  image = tf.cond(skip_noise,
                  lambda: image, lambda: image + noise_list[1])

  return image 
开发者ID:tensorflow,项目名称:mesh,代码行数:24,代码来源:data_aug_lib.py

示例13: get_lr

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def get_lr(global_step, base_lr,  # pylint: disable=missing-docstring
           decay_steps, lr_decay_factor, warmup_steps):

  warmup_lr = 0.0
  if warmup_steps > 0:
    warmup_lr = (tf.cast(global_step, tf.float32) * (base_lr / warmup_steps))

  if decay_steps:
    normal_lr = tf.train.piecewise_constant(
        global_step,
        [s for s in decay_steps],
        [base_lr * (lr_decay_factor ** i) for i in range(len(decay_steps) + 1)]
    )
  else:
    normal_lr = base_lr

  lr = tf.cond(
      tf.less(global_step, tf.cast(warmup_steps, dtype=tf.dtypes.int64)),
      lambda: warmup_lr, lambda: normal_lr)

  return lr


# TODO(akolesnikov): add more logging 
开发者ID:google-research,项目名称:s4l,代码行数:26,代码来源:trainer.py

示例14: _decode_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def _decode_fn(self, example):
    image = tf.image.decode_jpeg(example[self.IMAGE_KEY], channels=3)
    # Subtract LABEL_OFFSET so that labels are in [0, 1000).
    label = tf.cast(example[self.LABEL_KEY], tf.int32) - self.LABEL_OFFSET
    if FLAGS.get_flag_value('pseudo_label_key', None):
        # Always use original label for val / test set.
        label_original = tf.cast(example[self.ORIGINAL_LABEL_KEY],
                                 tf.int32) - self.LABEL_OFFSET
        if self.split_name in ['val', 'test']:
          label = label_original
        elif self.split_name in ['train', 'trainval']:
          label_flag = tf.cast(example[self.FLAG_KEY], tf.int32)
          label = tf.cond(
              tf.math.equal(label_flag, tf.constant(1, dtype=tf.int32)),
              true_fn=lambda: label_original,
              false_fn=lambda: label)
        else:
            raise ValueError('Unkown split{}'.format(self.split_name))

    return self.preprocess_fn({'image': image, 'label': label}) 
开发者ID:google-research,项目名称:s4l,代码行数:22,代码来源:datasets.py

示例15: log_deferred_tensor_value

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import cond [as 别名]
def log_deferred_tensor_value(self, key, tensor_value, global_step,
                                stack_offset=2, every_n=1):
    """Logs the value of a tensor when the graph is run."""
    caller = '(%s)' % mlperf_log.get_caller(stack_offset, self._root_dir)
    def create_print_op():
      return tf.print(_MLPERF_LOG_PREFIX, self.mlperf_model_name,
                      tf.timestamp(), caller, key,
                      ': { "deferred": true, "value":', tensor_value, '}',
                      output_stream=sys.stdout)
    maybe_print = tf.cond(tf.equal(global_step % every_n, 0), create_print_op,
                          tf.no_op)
    with tf.control_dependencies([maybe_print]):
      return tf.identity(tensor_value) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:15,代码来源:mlperf.py


注:本文中的tensorflow.compat.v1.cond方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。