当前位置: 首页>>代码示例>>Python>>正文


Python v1.mod方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.mod方法的典型用法代码示例。如果您正苦于以下问题:Python v1.mod方法的具体用法?Python v1.mod怎么用?Python v1.mod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.mod方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: noise_from_step_num

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def noise_from_step_num():
  """Quantization noise equal to (phi * (step_num + 1)) mod 1.0.

  Not using random_uniform here due to a problem on TPU in that random seeds
  are not respected, which may cause the parameters on different replicas
  to go out-of-sync.

  Returns:
    a float32 scalar
  """
  step = tf.to_int32(tf.train.get_or_create_global_step()) + 1
  phi = ((5 ** 0.5) - 1) / 2
  # Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous
  # due to loss of precision when the step number gets large.
  # Computation in doubles does not work on TPU, so we use this complicated
  # alternative computation which does not suffer from these roundoff errors.
  ret = 0.0
  for i in range(30):
    ret += (((phi * (2 ** i)) % 1.0)  # double-precision computation in python
            * tf.to_float(tf.mod(step // (2 ** i), 2)))
  return tf.mod(ret, 1.0) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:quantization.py

示例2: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t,
              use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t,
              use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)
        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t),
                                     use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:multistep_optimizer.py

示例3: unwrap

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def unwrap(p, discont=np.pi, axis=-1):
  """Unwrap a cyclical phase tensor.

  Args:
    p: Phase tensor.
    discont: Float, size of the cyclic discontinuity.
    axis: Axis of which to unwrap.

  Returns:
    unwrapped: Unwrapped tensor of same size as input.
  """
  dd = diff(p, axis=axis)
  ddmod = tf.mod(dd + np.pi, 2.0 * np.pi) - np.pi
  idx = tf.logical_and(tf.equal(ddmod, -np.pi), tf.greater(dd, 0))
  ddmod = tf.where(idx, tf.ones_like(ddmod) * np.pi, ddmod)
  ph_correct = ddmod - dd
  idx = tf.less(tf.abs(dd), discont)
  ddmod = tf.where(idx, tf.zeros_like(ddmod), dd)
  ph_cumsum = tf.cumsum(ph_correct, axis=axis)

  shape = p.get_shape().as_list()
  shape[axis] = 1
  ph_cumsum = tf.concat([tf.zeros(shape, dtype=p.dtype), ph_cumsum], axis=axis)
  unwrapped = p + ph_cumsum
  return unwrapped 
开发者ID:magenta,项目名称:magenta,代码行数:27,代码来源:spectral_ops.py

示例4: _finish

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def _finish(self, update_ops, name_scope):
    """Updates beta_power variables every n batches and incrs counter."""
    iter_ = self._get_iter_variable()
    beta1_power, beta2_power = self._get_beta_accumulators()
    with tf.control_dependencies(update_ops):
      with tf.colocate_with(iter_):

        def update_beta_op():
          update_beta1 = beta1_power.assign(
              beta1_power * self._beta1_t, use_locking=self._use_locking)
          update_beta2 = beta2_power.assign(
              beta2_power * self._beta2_t, use_locking=self._use_locking)
          return tf.group(update_beta1, update_beta2)

        maybe_update_beta = tf.cond(
            tf.equal(iter_, 0), update_beta_op, tf.no_op)
        with tf.control_dependencies([maybe_update_beta]):
          # TODO(cuong): It is suboptimal here because we have to cast twice
          # (float to int, and then int to float)
          update_iter = iter_.assign(
              tf.cast(
                  tf.mod(tf.cast(iter_ + 1.0, tf.int32), self._n_t),
                  tf.float32),
              use_locking=self._use_locking)
    return tf.group(
        *update_ops + [update_iter, maybe_update_beta], name=name_scope) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:28,代码来源:multistep_with_adamoptimizer.py

示例5: get_timing_signal_1d_given_position

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def get_timing_signal_1d_given_position(channels,
                                        position,
                                        min_timescale=1.0,
                                        max_timescale=1.0e4):
  """Get sinusoids of diff frequencies, with timing position given.

  Adapted from add_timing_signal_1d_given_position in
  //third_party/py/tensor2tensor/layers/common_attention.py

  Args:
    channels: scalar, size of timing embeddings to create. The number of
        different timescales is equal to channels / 2.
    position: a Tensor with shape [batch, seq_len]
    min_timescale: a float
    max_timescale: a float

  Returns:
    a Tensor of timing signals [batch, seq_len, channels]
  """
  num_timescales = channels // 2
  log_timescale_increment = (
      math.log(float(max_timescale) / float(min_timescale)) /
      (tf.to_float(num_timescales) - 1))
  inv_timescales = min_timescale * tf.exp(
      tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
  scaled_time = (
      tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims(
          tf.expand_dims(inv_timescales, 0), 0))
  signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2)
  signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]])
  return signal 
开发者ID:google-research,项目名称:albert,代码行数:33,代码来源:modeling.py

示例6: instantaneous_frequency

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def instantaneous_frequency(phase_angle, time_axis=-2, use_unwrap=True):
  """Transform a fft tensor from phase angle to instantaneous frequency.

  Take the finite difference of the phase. Pad with initial phase to keep the
  tensor the same size.
  Args:
    phase_angle: Tensor of angles in radians. [Batch, Time, Freqs]
    time_axis: Axis over which to unwrap and take finite difference.
    use_unwrap: True preserves original GANSynth behavior, whereas False will
        guard against loss of precision.

  Returns:
    dphase: Instantaneous frequency (derivative of phase). Same size as input.
  """
  if use_unwrap:
    # Can lead to loss of precision.
    phase_unwrapped = unwrap(phase_angle, axis=time_axis)
    dphase = diff(phase_unwrapped, axis=time_axis)
  else:
    # Keep dphase bounded. N.B. runs faster than a single mod-2pi expression.
    dphase = diff(phase_angle, axis=time_axis)
    dphase = tf.where(dphase > np.pi, dphase - 2 * np.pi, dphase)
    dphase = tf.where(dphase < -np.pi, dphase + 2 * np.pi, dphase)

  # Add an initial phase to dphase.
  size = phase_angle.get_shape().as_list()
  size[time_axis] = 1
  begin = [0 for unused_s in size]
  phase_slice = tf.slice(phase_angle, begin, size)
  dphase = tf.concat([phase_slice, dphase], axis=time_axis) / np.pi
  return dphase 
开发者ID:magenta,项目名称:magenta,代码行数:33,代码来源:spectral_ops.py

示例7: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def __init__(self, label, num_classes, num_targets=1, seed=None,
               collapse=True):
    # Overwrite the target indices. Each session.run() call gets new target
    # indices, the indices should remain the same across restarts.
    batch_size = tf.shape(label)[0]
    j = tf.random.uniform(shape=(batch_size, num_targets), minval=1,
                          maxval=num_classes, dtype=tf.int32, seed=seed)
    target_class = tf.mod(tf.cast(tf.expand_dims(label, -1), tf.int32) + j,
                          num_classes)
    super(RandomClassificationSpecification, self).__init__(
        label, num_classes, target_class, collapse=collapse) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:13,代码来源:specification.py

示例8: _get_random_class

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def _get_random_class(label, num_classes, seed=None):
  batch_size = tf.shape(label)[0]
  target_label = tf.random.uniform(
      shape=(batch_size,), minval=1, maxval=num_classes, dtype=tf.int64,
      seed=seed)
  return tf.mod(tf.cast(label, tf.int64) + target_label, num_classes) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:8,代码来源:utils.py

示例9: _get_least_likely_class

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def _get_least_likely_class(label, num_classes, logits):
  target_label = tf.argmin(logits, axis=1, output_type=tf.int64)
  # In the off-chance that the least likely class is the true class, the target
  # class is changed to the be the next index.
  return tf.mod(target_label + tf.cast(
      tf.equal(target_label, tf.cast(label, tf.int64)), tf.int64), num_classes) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:8,代码来源:utils.py

示例10: get_softmax_viz

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def get_softmax_viz(image, softmax, nrows=None):
  """Arrange softmax maps in a grid and superimpose them on the image."""
  softmax_shape = tf.shape(softmax)
  batch_size = softmax_shape[0]
  target_height = softmax_shape[1] * 2
  target_width = softmax_shape[2] * 2
  num_points = softmax_shape[3]

  if nrows is None:
    # Find a number of rows such that the arrangement is as square as possible.
    num_points_float = tf.cast(num_points, tf.float32)
    nfsqrt = tf.cast(tf.floor(tf.sqrt(num_points_float)), tf.int32)
    divs = tf.range(1, nfsqrt + 1)
    remainders = tf.mod(num_points_float, tf.cast(divs, tf.float32))
    divs = tf.gather(divs, tf.where(tf.equal(remainders, 0)))
    nrows = tf.reduce_max(divs)
  ncols = tf.cast(num_points / nrows, tf.int32)
  nrows = tf.cast(nrows, tf.int32)
  # Normalize per channel
  img = softmax / tf.reduce_max(softmax, axis=[1, 2], keepdims=True)
  # Use softmax as hue and saturation and original image as value of HSV image.
  greyimg = tf.image.rgb_to_grayscale(image)
  greyimg = tf.image.resize_images(greyimg, [target_height, target_width])
  greyimg = tf.tile(greyimg, [1, 1, 1, num_points])
  greyimg = tf.reshape(greyimg,
                       [batch_size, target_height, target_width, num_points, 1])
  img = tf.image.resize_images(img, [target_height, target_width])
  img = tf.reshape(img,
                   [batch_size, target_height, target_width, num_points, 1])
  img = tf.concat([img / 2.0 + 0.5, img, greyimg * 0.7 + 0.3], axis=4)

  # Rearrange channels into a ncols x nrows grid.
  img = tf.reshape(img,
                   [batch_size, target_height, target_width, nrows, ncols, 3])
  img = tf.transpose(img, [0, 3, 1, 4, 2, 5])
  img = tf.reshape(img,
                   [batch_size, target_height * nrows, target_width * ncols, 3])

  img = tf.image.hsv_to_rgb(img)
  return img 
开发者ID:google-research,项目名称:tensor2robot,代码行数:42,代码来源:visualization.py

示例11: make_hash_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def make_hash_fn():
  session = tf.Session()
  placeholder = tf.placeholder(tf.string, [])
  hash_bucket = tf.strings.to_hash_bucket_fast(placeholder, 100000)
  result = tf.mod(hash_bucket, 10)

  def _hash_fn(x):
    return session.run(result, feed_dict={placeholder: x})

  return _hash_fn 
开发者ID:google-research,项目名称:language,代码行数:12,代码来源:create_data_splits.py

示例12: padded_where

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def padded_where(condition, length):
  """TPU friendly version of tf.where(cond) with fixed length and padding.

  This is a wrapper around tf.where(cond) that returns the coordinates of the
  True elements of cond (case where x and y are None). This version, however,
  returns a fixed length tensor of coordinates, determined by `length`.  If the
  number of True elements in `condition` is less than `length`, then the
  returned tensor is right-padded with zeros. Otherwise, the returned tensor is
  truncated to `length` size.

  Args:
    condition: tf.Tensor of type boolean; any shape.
    length: Length of (last dimension of) the returned tensor.

  Returns:
    Two tensors:
    - a tensor of type int32, with same shape as `condition`, representing
      coordinates of the last dimension of `condition` tensor where values are
      True.
    - a mask tensor of type int32 with 1s in valid indices of the first tensor,
      and 0s for padded indices.
  """
  condition_shape = shape(condition)
  n = condition_shape[-1]

  # Build a tensor that counts indices from 0 to length of condition.
  ixs = tf.broadcast_to(tf.range(n, dtype=tf.int32), condition_shape)

  # Build tensor where True condition values get their index value or
  # n (== len(condition)) otherwise.
  ixs = tf.where(condition, ixs, tf.ones_like(condition, dtype=tf.int32) * n)

  # Sort indices (so that indices for False values == n, will be placed last),
  # and get the desired number of entries, truncating by `length`.
  ixs = tf.sort(ixs)[Ellipsis, 0:length]

  # For first tensor, zero-out values == n. For second tensor, put 1s where
  # values are < n, and 0s where values are == 0.
  return tf.mod(ixs, n), (1 - tf.div(ixs, n)) 
开发者ID:google-research,项目名称:language,代码行数:41,代码来源:tensor_utils.py

示例13: preprocess_example

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def preprocess_example(self, example, mode, hparams):
    if self.has_inputs:
      # Stack encoded score components depthwise as inputs.
      inputs = []
      for name, _ in self.score_encoders():
        inputs.append(tf.expand_dims(example[name], axis=1))
        del example[name]
      example['inputs'] = tf.stack(inputs, axis=2)

    if self.random_crop_in_train and mode == tf.estimator.ModeKeys.TRAIN:
      # Take a random crop of the training example.
      assert not self.has_inputs
      max_offset = tf.maximum(
          tf.shape(example['targets'])[0] - hparams.max_target_seq_length, 0)
      offset = tf.cond(
          max_offset > 0,
          lambda: tf.random_uniform([], maxval=max_offset, dtype=tf.int32),
          lambda: 0
      )
      example['targets'] = (
          example['targets'][offset:offset + hparams.max_target_seq_length])
      return example

    elif self.split_in_eval and mode == tf.estimator.ModeKeys.EVAL:
      # Split the example into non-overlapping segments.
      assert not self.has_inputs
      length = tf.shape(example['targets'])[0]
      extra_length = tf.mod(length, hparams.max_target_seq_length)
      examples = {
          'targets': tf.reshape(
              example['targets'][:length - extra_length],
              [-1, hparams.max_target_seq_length, 1, 1])
      }
      extra_example = {
          'targets': tf.reshape(
              example['targets'][-extra_length:], [1, -1, 1, 1])
      }
      dataset = tf.data.Dataset.from_tensor_slices(examples)
      extra_dataset = tf.data.Dataset.from_tensor_slices(extra_example)
      return dataset.concatenate(extra_dataset)

    else:
      # If not cropping or splitting, do standard preprocessing.
      return super(Score2PerfProblem, self).preprocess_example(
          example, mode, hparams) 
开发者ID:magenta,项目名称:magenta,代码行数:47,代码来源:score2perf.py

示例14: compute_progress

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def compute_progress(current_image_id, stable_stage_num_images,
                     transition_stage_num_images, num_blocks):
  """Computes the training progress.

  The training alternates between stable phase and transition phase.
  The `progress` indicates the training progress, i.e. the training is at
  - a stable phase p if progress = p
  - a transition stage between p and p + 1 if progress = p + fraction
  where p = 0,1,2.,...

  Note the max value of progress is `num_blocks` - 1.

  In terms of LOD (of the original implementation):
  progress = `num_blocks` - 1 - LOD

  Args:
    current_image_id: An scalar integer `Tensor` of the current image id, count
        from 0.
    stable_stage_num_images: An integer representing the number of images in
        each stable stage.
    transition_stage_num_images: An integer representing the number of images in
        each transition stage.
    num_blocks: Number of network blocks.

  Returns:
    A scalar float `Tensor` of the training progress.
  """
  # Note when current_image_id >= min_total_num_images - 1 (which means we
  # are already at the highest resolution), we want to keep progress constant.
  # Therefore, cap current_image_id here.
  capped_current_image_id = tf.minimum(
      current_image_id,
      min_total_num_images(stable_stage_num_images, transition_stage_num_images,
                           num_blocks) - 1)

  stage_num_images = stable_stage_num_images + transition_stage_num_images
  progress_integer = tf.floordiv(capped_current_image_id, stage_num_images)
  progress_fraction = tf.maximum(
      0.0,
      tf.to_float(
          tf.mod(capped_current_image_id, stage_num_images) -
          stable_stage_num_images) / tf.to_float(transition_stage_num_images))
  return tf.to_float(progress_integer) + progress_fraction 
开发者ID:magenta,项目名称:magenta,代码行数:45,代码来源:networks.py

示例15: export_model

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import mod [as 别名]
def export_model(estimator, export_dir, vocabulary, sequence_length,
                 batch_size=1, checkpoint_path=None):
  """Export a model in TF SavedModel format to be used for inference on CPUs.

  Args:
    estimator: Estimator object, estimator created with the appropriate
      model_fn.
    export_dir: str, a directory in which to create timestamped subdirectories
      containing exported SavedModels.
    vocabulary: sentencepiece vocab, vocabulary instance to use for encoding.
    sequence_length: an integer or a dict from feature-key to integer
      the (packed) sequence length, e.g. {"inputs": 512, "targets": 128}
    batch_size: int, number of sequences per batch. Should match estimator.
    checkpoint_path: str, path to checkpoint. If None (default), use the most
      recent in the model directory.

  Returns:
    The string path to the exported directory.
  """

  def serving_input_fn():
    """Constructs input portion of Graph in serving.

    Input is a batch of strings.

    Returns:
      a ServingInputReceiver
    """
    inputs = tf.placeholder(
        dtype=tf.string,
        shape=[None],
        name="inputs")

    padded_inputs = tf.pad(inputs, [(0, tf.mod(-tf.size(inputs), batch_size))])

    dataset = tf.data.Dataset.from_tensor_slices(padded_inputs)
    dataset = dataset.map(lambda x: {"inputs": x})
    dataset = transformer_dataset.encode_all_features(dataset, vocabulary)
    dataset = transformer_dataset.pack_or_pad(
        dataset=dataset,
        length=sequence_length,
        pack=False,
        feature_keys=["inputs"]
    )

    dataset = dataset.batch(batch_size)

    features = tf.data.experimental.get_single_element(dataset)
    return tf.estimator.export.ServingInputReceiver(
        features=features, receiver_tensors=inputs)

  return estimator.export_saved_model(
      export_dir, serving_input_fn, checkpoint_path=checkpoint_path) 
开发者ID:tensorflow,项目名称:mesh,代码行数:55,代码来源:utils.py


注:本文中的tensorflow.compat.v1.mod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。