当前位置: 首页>>代码示例>>Python>>正文


Python v1.map_fn方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.map_fn方法的典型用法代码示例。如果您正苦于以下问题:Python v1.map_fn方法的具体用法?Python v1.map_fn怎么用?Python v1.map_fn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.map_fn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fprop

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def fprop(self, x):
    if x.name in self._logits_dict:
      return self._logits_dict[x.name]

    x = tf.map_fn(tf.image.per_image_standardization, x)
    self._additional_features['inputs'] = x

    if self._scope is None:
      scope = tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE)
    else:
      scope = tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE)

    with scope:
      logits = self._model_fn(
          self._additional_features,
          None,
          'attack',
          params=self._params,
          config=self._config)
    self._logits_dict[x.name] = logits

    return {model.Model.O_LOGITS: tf.reshape(logits, [-1, logits.shape[-1]])} 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:24,代码来源:adv_attack_utils.py

示例2: _decode_masks

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def _decode_masks(self, parsed_tensors):
    """Decode a set of PNG masks to the tf.float32 tensors."""
    def _decode_png_mask(png_bytes):
      mask = tf.squeeze(
          tf.io.decode_png(png_bytes, channels=1, dtype=tf.uint8), axis=-1)
      mask = tf.cast(mask, dtype=tf.float32)
      mask.set_shape([None, None])
      return mask

    height = parsed_tensors['image/height']
    width = parsed_tensors['image/width']
    masks = parsed_tensors['image/object/mask']
    return tf.cond(
        tf.greater(tf.size(masks), 0),
        lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
        lambda: tf.zeros([0, height, width], dtype=tf.float32)) 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:18,代码来源:tf_example_decoder.py

示例3: randomize

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def randomize(images, init_shape, expand_shape=None, crop_shape=None,
              vertical_flip=False):
  """Returns a function that randomly translates and flips images."""
  def random_image(image):
    """Randmly translates and flips images."""
    image = tf.reshape(image, init_shape)
    current_shape = init_shape
    if expand_shape is not None and expand_shape != current_shape:
      if expand_shape[-1] != current_shape[-1]:
        raise ValueError('Number channels is not specified correctly.')
      image = tf.image.resize_image_with_crop_or_pad(
          image, expand_shape[0], expand_shape[1])
      current_shape = expand_shape
    if crop_shape is not None and crop_shape != current_shape:
      image = tf.random_crop(image, crop_shape)
    if vertical_flip:
      image = tf.image.random_flip_left_right(image)
    return image
  return tf.map_fn(random_image, images) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:21,代码来源:utils.py

示例4: maybe_ignore_batch

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def maybe_ignore_batch(spec_or_tensors, ignore_batch = False):
  """Optionally strips the batch dimension and returns new spec.

  Args:
    spec_or_tensors: A dict, (named)tuple, list or a hierarchy thereof filled by
      TensorSpecs(subclasses) or Tensors.
    ignore_batch: If True, the spec_or_batch's batch dimensions are ignored for
      shape comparison.

  Returns:
    spec_or_tensors: If ignore_batch=True we return a spec structure with the
      stripped batch_dimension otherwise we return spec_or_tensors.
  """
  if ignore_batch:
    def map_fn(spec):
      if isinstance(spec, np.ndarray):
        spec = tf.convert_to_tensor(spec)
      if isinstance(spec, tf.Tensor):
        return ExtendedTensorSpec.from_tensor(spec[0])
      else:
        return ExtendedTensorSpec.from_spec(spec, shape=spec.shape[1:])
    return nest.map_structure(
        map_fn,
        spec_or_tensors)
  return spec_or_tensors 
开发者ID:google-research,项目名称:tensor2robot,代码行数:27,代码来源:tensorspec_utils.py

示例5: batch_word_to_char_ids

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def batch_word_to_char_ids(words, word_length):
  """Batched version of word_to_char_ids.

  This is a deterministic function that should be computed during preprocessing.
  We pin this op to the CPU anyways to be safe, since it is slower on GPUs.

  Args:
    words: <string> [...]
    word_length: Number of bytes to include per word.

  Returns:
    char_ids: <int32> [..., word_length]
  """
  with tf.device("/cpu:0"):
    flat_words = tf.reshape(words, [-1])
    flat_char_ids = tf.map_fn(
        fn=partial(word_to_char_ids, word_length=word_length),
        elems=flat_words,
        dtype=tf.int32,
        back_prop=False)

  char_ids = tf.reshape(flat_char_ids,
                        tensor_utils.shape(words) + [word_length])
  return char_ids 
开发者ID:google-research,项目名称:language,代码行数:26,代码来源:char_utils.py

示例6: get_text_summary

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def get_text_summary(question, context, start_predictions, end_predictions):
  """Get a text summary of the question and the predicted answer."""
  question_text = tf.reduce_join(question, -1, separator=" ")

  def _get_prediction_text(args, window=5):
    """Get the prediction text for a single row in the batch."""
    current_context, start, end = args
    prediction_context_start = tf.maximum(start - window, 0)
    prediction_context_end = tf.minimum(end + 1 + window,
                                        tf.shape(current_context)[0])
    before = current_context[prediction_context_start:start]
    prediction = current_context[start:end + 1]
    after = current_context[end + 1:prediction_context_end]
    concat = tf.concat([before, ["**"], prediction, ["**"], after], 0)
    return tf.reduce_join(concat, separator=" ")

  prediction_text = tf.map_fn(
      fn=_get_prediction_text,
      elems=[context, start_predictions, end_predictions],
      dtype=tf.string,
      back_prop=False)

  return tf.summary.text("predictions",
                         tf.stack([question_text, prediction_text], -1)) 
开发者ID:google-research,项目名称:language,代码行数:26,代码来源:nq_short_pipeline_model.py

示例7: _pmf_to_cdf

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def _pmf_to_cdf(self, pmf, tail_mass, pmf_length, max_length):
    """Helper function for computing the CDF from the PMF."""

    # Prevent tensors from bouncing back and forth between host and GPU.
    with tf.device("/cpu:0"):
      def loop_body(args):
        prob, length, tail = args
        prob = tf.concat([prob[:length], tail], axis=0)
        cdf = range_coding_ops.pmf_to_quantized_cdf(
            prob, precision=self.range_coder_precision)
        return tf.pad(
            cdf, [[0, max_length - length]], mode="CONSTANT", constant_values=0)

      return tf.map_fn(
          loop_body, (pmf, pmf_length, tail_mass),
          dtype=tf.int32, back_prop=False, name="pmf_to_cdf") 
开发者ID:tensorflow,项目名称:compression,代码行数:18,代码来源:entropy_models.py

示例8: tensorspec_from_tensors

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def tensorspec_from_tensors(tensors):
  """Converts a collection of tensors to a collection of TensorSpec.

  A collection can only be a dict, namedtuple or a hierarchy thereof containing
  tensors or placeholders.

  Args:
    tensors: A dict, (named)tuple, list or a hierarchy thereof filled by
      tensors.

  Returns:
    Equivalent structure of tensors with Tensors replaced with TensorSpec.
  """
  assert_valid_spec_structure(tensors)

  # Every tensor needs to have a unique name. This is a requirement for the
  # spec structure. We use the closure to pass the integer into the map_fn.
  # Note we cannot simply use unique_index = 0 since integers cannot be changed
  # without changing the reference.
  unique_index = [0]

  def map_fn(tensor):
    unique_name = '{}/{}'.format(tensor.op.name, unique_index[0])
    unique_index[0] += 1
    return ExtendedTensorSpec.from_tensor(tensor, unique_name)

  return nest.map_structure(map_fn, tensors) 
开发者ID:google-research,项目名称:tensor2robot,代码行数:29,代码来源:tensorspec_utils.py

示例9: _preprocess_fn

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def _preprocess_fn(
      self, features,
      labels, mode
  ):
    """The preprocessing function which will be executed prior to the model_fn.

    Note, _preprocess_fn is invoked for a batch of features and labels.
    If the _preprocess_fn can only operate on batch_size one please use
    the following pattern.

    def _fn(features_single_batch, labels_single_batch):
      # The actual implementation

    return = tf.map_fn(
      _fn, # The single batch implementation
      (features, labels), # Our nested structure, the first dimension unpacked
      dtype=(self.get_out_feature_specification(),
             self.get_out_labels_specification()),
      back_prop=False,
      parallel_iterations=self._parallel_iterations)

    Args:
      features: The input features extracted from a single example in our
        in_feature_specification format.
      labels: (Optional None) The input labels extracted from a single example
        in our in_label_specification format.
      mode: (ModeKeys) Specifies if this is training, evaluation or prediction.

    Returns:
      features_preprocessed: The preprocessed features, potentially adding
        additional tensors derived from the input features.
      labels_preprocessed: (Optional) The preprocessed labels, potentially
        adding additional tensors derived from the input features and labels.
    """ 
开发者ID:google-research,项目名称:tensor2robot,代码行数:36,代码来源:abstract_preprocessor.py

示例10: inception_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def inception_logits(images = inception_images, num_splits = 1):
    images = tf.transpose(images, [0, 2, 3, 1])
    size = 299
    images = tf.compat.v1.image.resize_bilinear(images, [size, size])
    generated_images_list = array_ops.split(images, num_or_size_splits = num_splits)
    logits = tf.map_fn(
        fn = tfgan.eval.classifier_fn_from_tfhub(INCEPTION_TFHUB, INCEPTION_OUTPUT, True),
        elems = array_ops.stack(generated_images_list),
        parallel_iterations = 8,
        back_prop = False,
        swap_memory = True,
        name = 'RunClassifier')
    logits = array_ops.concat(array_ops.unstack(logits), 0)
    return logits 
开发者ID:tsc2017,项目名称:Inception-Score,代码行数:16,代码来源:inception_score.py

示例11: inception_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def inception_logits(images):
    images = tf.transpose(images, [0, 2, 3, 1])
    images = tf.compat.v1.image.resize_bilinear(images, [inception_size, inception_size])
    generated_images_list = array_ops.split(images, num_or_size_splits = 1)
    logits = tf.map_fn(
        fn = tfgan.eval.classifier_fn_from_tfhub(INCEPTION_TFHUB, INCEPTION_OUTPUT, True),
        elems = array_ops.stack(generated_images_list),
        parallel_iterations = 1,
        back_prop = False,
        swap_memory = True,
        name = 'RunClassifier')
    logits = array_ops.concat(array_ops.unstack(logits), 0)
    return logits 
开发者ID:tsc2017,项目名称:Inception-Score,代码行数:15,代码来源:inception_score_tpu.py

示例12: batch_image_preprocess

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def batch_image_preprocess(raw_images,
                           image_size: Union[int, Tuple[int, int]],
                           batch_size: int = None):
  """Preprocess batched images for inference.

  Args:
    raw_images: a list of images, each image can be a tensor or a numpy arary.
    image_size: single integer of image size for square image or tuple of two
      integers, in the format of (image_height, image_width).
    batch_size: if None, use map_fn to deal with dynamic batch size.

  Returns:
    (image, scale): a tuple of processed images and scales.
  """
  if not batch_size:
    # map_fn is a little bit slower due to some extra overhead.
    map_fn = functools.partial(image_preprocess, image_size=image_size)
    images, scales = tf.map_fn(
        map_fn, raw_images, dtype=(tf.float32, tf.float32), back_prop=False)
    return (images, scales)

  # If batch size is known, use a simple loop.
  scales, images = [], []
  for i in range(batch_size):
    image, scale = image_preprocess(raw_images[i], image_size)
    scales.append(scale)
    images.append(image)
  images = tf.stack(images)
  scales = tf.stack(scales)
  return (images, scales) 
开发者ID:PINTO0309,项目名称:PINTO_model_zoo,代码行数:32,代码来源:inference.py

示例13: select_slate_optimal

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def select_slate_optimal(slate_size, s_no_click, s, q):
  """Selects the slate using exhaustive search.

  This algorithm corresponds to the method "OS" in
  Ie et al. https://arxiv.org/abs/1905.12767.

  Args:
    slate_size: int, the size of the recommendation slate.
    s_no_click: float tensor, the score for not clicking any document.
    s: [num_of_documents] tensor, the scores for clicking documents.
    q: [num_of_documents] tensor, the predicted q values for documents.

  Returns:
    [slate_size] tensor, the selected slate.
  """

  num_candidates = s.shape.as_list()[0]

  # Obtain all possible slates given current docs in the candidate set.
  mesh_args = [list(range(num_candidates))] * slate_size
  slates = tf.stack(tf.meshgrid(*mesh_args), axis=-1)
  slates = tf.reshape(slates, shape=(-1, slate_size))

  # Filter slates that include duplicates to ensure each document is picked
  # at most once.
  unique_mask = tf.map_fn(
      lambda x: tf.equal(tf.size(input=x), tf.size(input=tf.unique(x)[0])),
      slates,
      dtype=tf.bool)
  slates = tf.boolean_mask(tensor=slates, mask=unique_mask)

  slate_q_values = tf.gather(s * q, slates)
  slate_scores = tf.gather(s, slates)
  slate_normalizer = tf.reduce_sum(
      input_tensor=slate_scores, axis=1) + s_no_click

  slate_q_values = slate_q_values / tf.expand_dims(slate_normalizer, 1)
  slate_sum_q_values = tf.reduce_sum(input_tensor=slate_q_values, axis=1)
  max_q_slate_index = tf.argmax(input=slate_sum_q_values)
  return tf.gather(slates, max_q_slate_index, axis=0) 
开发者ID:google-research,项目名称:recsim,代码行数:42,代码来源:slate_decomp_q_agent.py

示例14: decompress

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def decompress(self, strings, **kwargs):
    """Decompress values from their compressed string representations.

    Arguments:
      strings: A string `Tensor` vector containing the compressed data.
      **kwargs: Model-specific keyword arguments.

    Returns:
      The decompressed `Tensor`.
    """
    with tf.name_scope(self._name_scope()):
      strings = tf.convert_to_tensor(strings, dtype=tf.string)

      indexes = self._prepare_indexes(**kwargs)
      ndim = self.input_spec.ndim
      broadcast_indexes = (indexes.shape.ndims != ndim)
      if broadcast_indexes:
        # We can't currently broadcast over anything else but the batch axis.
        assert indexes.shape.ndims == ndim - 1
        args = (strings,)
      else:
        args = (strings, indexes)

      def loop_body(args):
        symbols = range_coding_ops.unbounded_index_range_decode(
            args[0], indexes if broadcast_indexes else args[1],
            self._quantized_cdf, self._cdf_length, self._offset,
            precision=self.range_coder_precision, overflow_width=4,
            debug_level=0)
        return symbols

      symbols = tf.map_fn(
          loop_body, args, dtype=tf.int32, back_prop=False, name="decompress")

      outputs = self._dequantize(symbols, "dequantize")
      assert outputs.dtype == self.dtype

      if not tf.executing_eagerly():
        outputs.set_shape(self.input_spec.shape)

      return outputs 
开发者ID:tensorflow,项目名称:compression,代码行数:43,代码来源:entropy_models.py

示例15: tf_apply_to_image_or_images

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import map_fn [as 别名]
def tf_apply_to_image_or_images(fn, image_or_images, **map_kw):
  """Applies a function to a single image or each image in a batch of them.

  Args:
    fn: the function to apply, receives an image, returns an image.
    image_or_images: Either a single image, or a batch of images.
    **map_kw: Arguments passed through to tf.map_fn if called.

  Returns:
    The result of applying the function to the image or batch of images.

  Raises:
    ValueError: if the input is not of rank 3 or 4.
  """
  static_rank = len(image_or_images.get_shape().as_list())
  if static_rank == 3:  # A single image: HWC
    return fn(image_or_images)
  elif static_rank == 4:  # A batch of images: BHWC
    return tf.map_fn(fn, image_or_images, **map_kw)
  elif static_rank > 4:  # A batch of images: ...HWC
    input_shape = tf.shape(image_or_images)
    h, w, c = image_or_images.get_shape().as_list()[-3:]
    image_or_images = tf.reshape(image_or_images, [-1, h, w, c])
    image_or_images = tf.map_fn(fn, image_or_images, **map_kw)
    return tf.reshape(image_or_images, input_shape)
  else:
    raise ValueError("Unsupported image rank: %d" % static_rank) 
开发者ID:google-research,项目名称:s4l,代码行数:29,代码来源:utils.py


注:本文中的tensorflow.compat.v1.map_fn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。