当前位置: 首页>>代码示例>>Python>>正文


Python v1.fill方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.fill方法的典型用法代码示例。如果您正苦于以下问题:Python v1.fill方法的具体用法?Python v1.fill怎么用?Python v1.fill使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.fill方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: reset

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def reset(self, entries_to_reset):
    """Reset the entries in the memory.

    Args:
      entries_to_reset: a 1D tensor.
    Returns:
      the reset op.
    """
    num_updates = tf.size(entries_to_reset)
    update_vals = tf.scatter_update(
        self.mem_vals, entries_to_reset,
        tf.tile(tf.expand_dims(
            tf.fill([self.memory_size, self.val_depth], .0), 0),
                [num_updates, 1, 1]))
    update_logits = tf.scatter_update(
        self.mean_logits, entries_to_reset,
        tf.tile(tf.expand_dims(
            tf.fill([self.memory_size], .0), 0),
                [num_updates, 1]))
    reset_op = tf.group([update_vals, update_logits])
    return reset_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:23,代码来源:transformer_memory.py

示例2: get_multi_dataset

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def get_multi_dataset(datasets, pmf=None):
  """Returns a Dataset that samples records from one or more Datasets.

  Args:
    datasets: A list of one or more Dataset objects to sample from.
    pmf: A tensor of shape [len(datasets)], the probabilities to sample each
      dataset with. This tensor is often constructed with the global_step. If
      this is None, we sample from the datasets uniformly at random.

  Returns:
    A Dataset object containing records from multiple datasets. Note that
    because this dataset iterates through other datasets it is stateful, thus
    you will need to call make_initializable_iterator instead of
    make_one_shot_iterator.
  """
  pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf
  samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets]
  sample = lambda _: categorical_case(pmf, samplers)
  return tf.data.Dataset.from_tensors([]).repeat().map(sample) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:21,代码来源:multi_problem_v2.py

示例3: word_to_char_ids

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def word_to_char_ids(word, word_length):
  """Convert a string to a padded vector of character ids.

  If the true length of the word is less than `word_length`, padding is added.
  If the true length of the word is greater than `word_length`, additional bytes
  are ignored.

  Args:
    word: <string> []
    word_length: Number of bytes to include per word.

  Returns:
    char_ids: <int32> [word_length]
  """
  char_ids = tf.to_int32(tf.decode_raw(word, tf.uint8))[:word_length - 2]
  padding = tf.fill([word_length - tf.shape(char_ids)[0] - 2], PAD_CHAR)
  char_ids = tf.concat([[BOW_CHAR], char_ids, [EOW_CHAR], padding], 0)
  char_ids.set_shape([word_length])
  return char_ids 
开发者ID:google-research,项目名称:language,代码行数:21,代码来源:char_utils.py

示例4: _test_fill

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _test_fill(dims, value_data, value_dtype):
    """ Use the fill op to create a tensor of value_data with constant dims."""

    value_data = np.array(value_data, dtype=value_dtype)
    # TF 1.13 TFLite convert method does not accept empty shapes
    if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'):
        with tf.Graph().as_default():
            value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[])
            out = tf.fill(dims,  value)
            compare_tflite_with_tvm([value_data], ["value"], [value], [out])

    with tf.Graph().as_default():
        input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims)
        # Fill op gets converted to static tensor during conversion
        out = tf.fill(dims,  value_data)
        out1 = tf.add(out, input1)
        input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)
        compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1]) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:20,代码来源:test_forward.py

示例5: _test_preprocessing_eval

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _test_preprocessing_eval(self, image_height, image_width, output_height,
                               output_width):
    image = tf.fill((image_height, image_width, 3),
                    tf.constant(128, dtype=tf.uint8))
    params = benchmark_cnn.make_params()
    new_image = preprocessing.eval_image(image, output_height, output_width, 0,
                                         'bilinear', params.summary_verbosity)
    with self.test_session() as sess:
      new_image_value = sess.run(new_image)
    self.assertAllEqual(new_image_value,
                        np.full((output_height, output_width, 3), 128,
                                dtype=np.uint8)) 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:14,代码来源:benchmark_cnn_test.py

示例6: _create_topk_unique

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _create_topk_unique(inputs, k):
  """Creates the top k values in sorted order with indices.

  Args:
    inputs: A tensor with rank of 2. [batch_size, original_size].
    k: An integer, number of top elements to select.

  Returns:
    topk_r2: A tensor, the k largest elements. [batch_size, k].
    topk_indices_r2: A tensor, indices of the top k values. [batch_size, k].
  """
  height = inputs.shape[0]
  width = inputs.shape[1]
  neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32)
  ones = tf.ones([height, width], dtype=tf.float32)
  neg_inf_r2 = ones * neg_inf_r0
  inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs)

  # Select the current largest value k times and keep them in topk_r2. The
  # selected largest values are marked as the smallest value to avoid being
  # selected again.
  tmp = inputs
  topk_r2 = tf.zeros([height, k], dtype=tf.float32)
  for i in range(k):
    kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True)
    k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0),
                     [height, 1])
    topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2)
    ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width]))
    tmp = tf.where(ge_r2, neg_inf_r2, inputs)

  log2_ceiling = int(math.ceil(math.log(float(int(width)), 2)))
  next_power_of_two = 1 << log2_ceiling
  count_mask = next_power_of_two - 1
  mask_r0 = tf.constant(count_mask)
  mask_r2 = tf.fill([height, k], mask_r0)
  topk_r2_s32 = tf.bitcast(topk_r2, tf.int32)
  topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2)
  return topk_r2, topk_indices_r2 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:41,代码来源:beam_search.py

示例7: add_special_tokens

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def add_special_tokens(segment_tokens, cls_token, sep_token):
  """Adds special tokens to segment tokens.

  Appends a [SEP] token to each segment.
  Prepends a [CLS] token to the first segment.

  Args:
    segment_tokens (RaggedTensor): a 2-D RaggedTensor of strings. One row for
      each segment. Each row is a list of tokens.
    cls_token (unicode): string for CLS token.
    sep_token (unicode): string for SEP token.

  Returns:
    segment_tokens (Tensor): a 2-D string Tensor.
  """
  num_rows = tf.to_int32(segment_tokens.nrows())

  # One SEP token for every row.
  sep_tokens = tf.fill([num_rows, 1], sep_token)

  # One CLS token in the first row.
  cls_tokens = tf.RaggedTensor.from_row_lengths([cls_token],
                                                row_lengths=tf.one_hot(
                                                    0, num_rows,
                                                    dtype=tf.int64))

  segment_tokens = tf.concat([cls_tokens, segment_tokens, sep_tokens], axis=1)
  return segment_tokens 
开发者ID:google-research,项目名称:language,代码行数:30,代码来源:preprocess.py

示例8: apply_masking

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def apply_masking(token_ids, target_token_ids, mask_indices, mask_token_id,
                  vocab_size):
  """Applies BERT masking.

  Args:
    token_ids (Tensor): 1-D Tensor of token IDs (ints)
    target_token_ids (Tensor): 1-D Tensor of token IDs (ints)
    mask_indices (Tensor): 1-D Tensor of indices (ints)
    mask_token_id (int): ID of [MASK] token.
    vocab_size (int): total size of vocabulary.

  Returns:
    token_ids_masked (Tensor): 1-D Tensor of token IDs, after target positions
      have been replaced with [MASK], a random token, or left alone.
    target_token_ids (Tensor): the original token IDs at the target positions.
  """
  num_to_mask = tf.size(mask_indices)

  mask_token_ids = tf.fill([num_to_mask], tf.cast(mask_token_id, tf.int64))
  random_token_ids = tf.random.uniform([num_to_mask],
                                       minval=0,
                                       maxval=vocab_size,
                                       dtype=tf.int64)

  # Uniform [0, 1) floats.
  randomness = tf.random.uniform([num_to_mask])

  # Replace target tokens with mask tokens.
  mask_values = tf.where(randomness < 0.8, mask_token_ids, target_token_ids)

  # Replace target tokens with random tokens.
  mask_values = tf.where(randomness > 0.9, random_token_ids, mask_values)

  # Mask out token_ids at mask_indices.
  token_ids_masked = tf.tensor_scatter_update(token_ids, mask_indices[:, None],
                                              mask_values)

  return token_ids_masked 
开发者ID:google-research,项目名称:language,代码行数:40,代码来源:preprocess.py

示例9: createColorfulTestImage

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def createColorfulTestImage(self):
    ch255 = tf.fill([1, 100, 200, 1], tf.constant(255, dtype=tf.uint8))
    ch128 = tf.fill([1, 100, 200, 1], tf.constant(128, dtype=tf.uint8))
    ch0 = tf.fill([1, 100, 200, 1], tf.constant(0, dtype=tf.uint8))
    imr = tf.concat([ch255, ch0, ch0], 3)
    img = tf.concat([ch255, ch255, ch0], 3)
    imb = tf.concat([ch255, ch0, ch255], 3)
    imw = tf.concat([ch128, ch128, ch128], 3)
    imu = tf.concat([imr, img], 2)
    imd = tf.concat([imb, imw], 2)
    im = tf.concat([imu, imd], 1)
    return im 
开发者ID:tensorflow,项目名称:models,代码行数:14,代码来源:preprocessor_test.py

示例10: testRandomPixelValueScale

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def testRandomPixelValueScale(self):

    def graph_fn():
      preprocessing_options = []
      preprocessing_options.append((preprocessor.normalize_image, {
          'original_minval': 0,
          'original_maxval': 255,
          'target_minval': 0,
          'target_maxval': 1
      }))
      preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
      images = self.createTestImages()
      tensor_dict = {fields.InputDataFields.image: images}
      tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
      images_min = tf.cast(images, dtype=tf.float32) * 0.9 / 255.0
      images_max = tf.cast(images, dtype=tf.float32) * 1.1 / 255.0
      images = tensor_dict[fields.InputDataFields.image]
      values_greater = tf.greater_equal(images, images_min)
      values_less = tf.less_equal(images, images_max)
      values_true = tf.fill([1, 4, 4, 3], True)
      return [values_greater, values_less, values_true]

    (values_greater_, values_less_,
     values_true_) = self.execute_cpu(graph_fn, [])
    self.assertAllClose(values_greater_, values_true_)
    self.assertAllClose(values_less_, values_true_) 
开发者ID:tensorflow,项目名称:models,代码行数:28,代码来源:preprocessor_test.py

示例11: _test_fill

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _test_fill(in_shape):
    """ Use the fill op to create a tensor of ones with non-constant shape."""

    with tf.Graph().as_default():
        tf.ones(shape=in_shape, dtype='float32')
        compare_tf_with_tvm(in_shape, [], 'ones:0', opt_level=1) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:8,代码来源:test_forward.py

示例12: _test_fill_from_tensor

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _test_fill_from_tensor(in_shape):
    """ Use the fill op to create a tensor of ones with non-constant shape.
        Some extra ops need to be added here to prevent the graph from
        being fully constant and folded away."""

    data = np.random.uniform(size=in_shape).astype('float32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(
            shape=[in_shape[0], in_shape[1], None, None], dtype=data.dtype)

        x = tf.ones(shape=2*tf.shape(in_data), dtype=data.dtype)
        y = tf.math.add(in_data, tf.reduce_mean(x), name='out1')
        compare_tf_with_tvm(data, 'Placeholder:0', 'out1:0') 
开发者ID:apache,项目名称:incubator-tvm,代码行数:16,代码来源:test_forward.py

示例13: _test_fill_symbolic_inputs

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _test_fill_symbolic_inputs(in_shape_data, in_value_data, dtype):
    with tf.Graph().as_default():
        in_shape = tf.placeholder(shape=[in_shape_data.shape[0]], dtype=in_shape_data.dtype)
        in_value = tf.placeholder(shape=(), dtype=dtype)
        out = tf.fill(in_shape, in_value)
        for mode in ['debug', 'vm']:
            compare_tf_with_tvm([in_shape_data, in_value_data], [in_shape.name, in_value.name], out.name, mode=mode) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:9,代码来源:test_forward.py

示例14: encode

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def encode(self, sequence, sequence_length):
    """Hierarchically encodes the input sequences, returning a single embedding.

    Each sequence should be padded per-segment. For example, a sequence with
    three segments [1, 2, 3], [4, 5], [6, 7, 8 ,9] and a `max_seq_len` of 12
    should be input as `sequence = [1, 2, 3, 0, 4, 5, 0, 0, 6, 7, 8, 9]` with
    `sequence_length = [3, 2, 4]`.

    Args:
      sequence: A batch of (padded) sequences, sized
        `[batch_size, max_seq_len, input_depth]`.
      sequence_length: A batch of sequence lengths. May be sized
        `[batch_size, level_lengths[0]]` or `[batch_size]`. If the latter,
        each length must either equal `max_seq_len` or 0. In this case, the
        segment lengths are assumed to be constant and the total length will be
        evenly divided amongst the segments.

    Returns:
      embedding: A batch of embeddings, sized `[batch_size, N]`.
    """
    batch_size = int(sequence.shape[0])
    sequence_length = lstm_utils.maybe_split_sequence_lengths(
        sequence_length, np.prod(self._level_lengths[1:]),
        self._total_length)

    for level, (num_splits, h_encoder) in enumerate(
        self._hierarchical_encoders):
      split_seqs = tf.split(sequence, num_splits, axis=1)
      # In the first level, we use the input `sequence_length`. After that,
      # we use the full embedding sequences.
      if level:
        sequence_length = tf.fill(
            [batch_size, num_splits], split_seqs[0].shape[1])
      split_lengths = tf.unstack(sequence_length, axis=1)
      embeddings = [
          h_encoder.encode(s, l) for s, l in zip(split_seqs, split_lengths)]
      sequence = tf.stack(embeddings, axis=1)

    with tf.control_dependencies([tf.assert_equal(tf.shape(sequence)[1], 1)]):
      return sequence[:, 0]


# DECODERS 
开发者ID:magenta,项目名称:magenta,代码行数:45,代码来源:lstm_models.py

示例15: _hierarchical_decode

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import fill [as 别名]
def _hierarchical_decode(self, z, base_decode_fn):
    """Depth first decoding from `z`, passing final embeddings to base fn."""
    batch_size = z.shape[0]
    # Subtract 1 for the core decoder level.
    num_levels = len(self._level_lengths) - 1

    hparams = self.hparams
    batch_size = hparams.batch_size

    def recursive_decode(initial_input, path=None):
      """Recursive hierarchical decode function."""
      path = path or []
      level = len(path)

      if level == num_levels:
        with tf.variable_scope('core_decoder', reuse=tf.AUTO_REUSE):
          return base_decode_fn(initial_input, path)

      scope = tf.VariableScope(
          tf.AUTO_REUSE, 'decoder/hierarchical_level_%d' % level)
      num_steps = self._level_lengths[level]
      with tf.variable_scope(scope):
        state = lstm_utils.initial_cell_state_from_embedding(
            self._hier_cells[level], initial_input, name='initial_state')
      if level not in self._disable_autoregression:
        # The initial input should be the same size as the tensors returned by
        # next level.
        if self._hierarchical_encoder:
          input_size = self._hierarchical_encoder.level(0).output_depth
        elif level == num_levels - 1:
          input_size = sum(tf.nest.flatten(self._core_decoder.state_size))
        else:
          input_size = sum(
              tf.nest.flatten(self._hier_cells[level + 1].state_size))
        next_input = tf.zeros([batch_size, input_size])
      lower_level_embeddings = []
      for i in range(num_steps):
        if level in self._disable_autoregression:
          next_input = tf.zeros([batch_size, 1])
        else:
          next_input = tf.concat([next_input, initial_input], axis=1)
        with tf.variable_scope(scope):
          output, state = self._hier_cells[level](next_input, state, scope)
        next_input = recursive_decode(output, path + [i])
        lower_level_embeddings.append(next_input)
      if self._hierarchical_encoder:
        # Return the encoding of the outputs using the appropriate level of the
        # hierarchical encoder.
        enc_level = num_levels - level
        return self._hierarchical_encoder.level(enc_level).encode(
            sequence=tf.stack(lower_level_embeddings, axis=1),
            sequence_length=tf.fill([batch_size], num_steps))
      else:
        # Return the final state.
        return tf.concat(tf.nest.flatten(state), axis=-1)

    return recursive_decode(z) 
开发者ID:magenta,项目名称:magenta,代码行数:59,代码来源:lstm_models.py


注:本文中的tensorflow.compat.v1.fill方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。