当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.strided_slice方法代码示例

本文整理汇总了Python中tensorflow.strided_slice方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.strided_slice方法的具体用法?Python tensorflow.strided_slice怎么用?Python tensorflow.strided_slice使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.strided_slice方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def read(self, filename_queue):
        data, label = read_tfrecord(
            filename_queue,
            {'nodes': [-1, self._grapher.num_node_channels],
             'neighborhood': [self._num_nodes, self._neighborhood_size]})

        nodes = data['nodes']

        # Convert the neighborhood to a feature map.
        def _map_features(node):
            i = tf.maximum(node, 0)
            positive = tf.strided_slice(nodes, [i], [i+1], [1])
            negative = tf.zeros([1, self._grapher.num_node_channels])

            return tf.where(i < 0, negative, positive)

        data = tf.reshape(data['neighborhood'], [-1])
        data = tf.cast(data, tf.int32)
        data = tf.map_fn(_map_features, data, dtype=tf.float32)
        shape = [self._num_nodes, self._neighborhood_size,
                 self._grapher.num_node_channels]
        data = tf.reshape(data, shape)

        return Record(data, shape, label) 
开发者ID:rusty1s,项目名称:graph-based-image-classification,代码行数:26,代码来源:patchy.py

示例2: node_sequence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def node_sequence(sequence, width, stride):
    """Normalizes a given sequence to have a fixed width by striding over the
    sequence. The returned sequence is padded with -1 if its length is lower
    than the requested width.

    Args:
        sequence: A 1d tensor.
        width: The length of the returned sequence.
        stride: The distance between two selected nodes.

    Returns:
        A 1d tensor.
    """

    with tf.name_scope('node_sequence', values=[sequence, width, stride]):
        # Stride the sequence based on the given stride size.
        sequence = tf.strided_slice(sequence, [0], [width*stride], [stride])

        # Pad right with -1 if the sequence length is lower than width.
        padding = tf.ones([width - tf.shape(sequence)[0]], dtype=tf.int32)
        padding = tf.negative(padding)
        sequence = tf.concat(0, [sequence, padding])

    return sequence 
开发者ID:rusty1s,项目名称:graph-based-image-classification,代码行数:26,代码来源:node_sequence.py

示例3: embedding_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def embedding_layer(self):
        with tf.name_scope("word_embeddings"):
            self.encoder_embeddings = tf.Variable(
                initial_value=np.array(self.encoder_embeddings_matrix, dtype=np.float32),
                dtype=tf.float32, trainable=False)
            self.enc_embed_input = tf.nn.embedding_lookup(self.encoder_embeddings, self.input_data)
            # self.enc_embed_input = tf.nn.dropout(self.enc_embed_input, keep_prob=self.keep_prob)

            with tf.name_scope("decoder_inputs"):
                self.decoder_embeddings = tf.Variable(
                    initial_value=np.array(self.decoder_embeddings_matrix, dtype=np.float32),
                    dtype=tf.float32, trainable=False)
                
                keep = tf.where(
                    tf.random_uniform([self.batch_size, self.decoder_num_tokens]) < self.word_dropout_keep_prob,
                    tf.fill([self.batch_size, self.decoder_num_tokens], True),
                    tf.fill([self.batch_size, self.decoder_num_tokens], False))
                ending = tf.cast(keep, dtype=tf.int32) * self.target_data
                ending = tf.strided_slice(ending, [0, 0], [self.batch_size, -1], [1, 1],
                                          name='slice_input')  # Minus 1 implies everything till the last dim
                self.dec_input = tf.concat([tf.fill([self.batch_size, 1], self.decoder_word_index['GO']), ending], 1,
                                           name='dec_input')
                self.dec_embed_input = tf.nn.embedding_lookup(self.decoder_embeddings, self.dec_input)
                # self.dec_embed_input = tf.nn.dropout(self.dec_embed_input, keep_prob=self.keep_prob) 
开发者ID:HareeshBahuleyan,项目名称:tf-var-attention,代码行数:26,代码来源:ved_varAttn.py

示例4: embedding_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def embedding_layer(self):
        with tf.name_scope("word_embeddings"):
            self.encoder_embeddings = tf.Variable(
                initial_value=np.array(self.encoder_embeddings_matrix, dtype=np.float32),
                dtype=tf.float32, trainable=False)
            self.enc_embed_input = tf.nn.embedding_lookup(self.encoder_embeddings, self.input_data)
            # self.enc_embed_input = tf.nn.dropout(self.enc_embed_input, keep_prob=self.keep_prob)

            with tf.name_scope("decoder_inputs"):
                self.decoder_embeddings = tf.Variable(
                    initial_value=np.array(self.decoder_embeddings_matrix, dtype=np.float32),
                    dtype=tf.float32, trainable=False)
                ending = tf.strided_slice(self.target_data, [0, 0], [self.batch_size, -1], [1, 1],
                                          name='slice_input')  # Minus 1 implies everything till the last dim
                self.dec_input = tf.concat([tf.fill([self.batch_size, 1], self.decoder_word_index['GO']), ending], 1,
                                           name='dec_input')
                self.dec_embed_input = tf.nn.embedding_lookup(self.decoder_embeddings, self.dec_input)
                # self.dec_embed_input = tf.nn.dropout(self.dec_embed_input, keep_prob=self.keep_prob) 
开发者ID:HareeshBahuleyan,项目名称:tf-var-attention,代码行数:20,代码来源:ded_detAttn.py

示例5: embedding_layer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def embedding_layer(self):
        with tf.name_scope("word_embeddings"):
            self.encoder_embeddings = tf.Variable(
                initial_value=np.array(self.encoder_embeddings_matrix, dtype=np.float32),
                dtype=tf.float32, trainable=False)
            self.enc_embed_input = tf.nn.embedding_lookup(self.encoder_embeddings, self.input_data)
            # self.enc_embed_input = tf.nn.dropout(self.enc_embed_input, keep_prob=self.keep_prob)

            with tf.name_scope("decoder_inputs"):
                self.decoder_embeddings = tf.Variable(
                    initial_value=np.array(self.decoder_embeddings_matrix, dtype=np.float32),
                    dtype=tf.float32, trainable=False)
                keep = tf.where(
                    tf.random_uniform([self.batch_size, self.decoder_num_tokens]) < self.word_dropout_keep_prob,
                    tf.fill([self.batch_size, self.decoder_num_tokens], True),
                    tf.fill([self.batch_size, self.decoder_num_tokens], False))
                ending = tf.cast(keep, dtype=tf.int32) * self.target_data
                ending = tf.strided_slice(ending, [0, 0], [self.batch_size, -1], [1, 1],
                                          name='slice_input')  # Minus 1 implies everything till the last dim
                self.dec_input = tf.concat([tf.fill([self.batch_size, 1], self.decoder_word_index['GO']), ending], 1,
                                           name='dec_input')
                self.dec_embed_input = tf.nn.embedding_lookup(self.decoder_embeddings, self.dec_input)
                # self.dec_embed_input = tf.nn.dropout(self.dec_embed_input, keep_prob=self.keep_prob) 
开发者ID:HareeshBahuleyan,项目名称:tf-var-attention,代码行数:25,代码来源:ved_detAttn.py

示例6: compute_voxel_group

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def compute_voxel_group(tensor, group_id):
  """Extracts voxel group group_id (1-indexed) from (3, 4, or 5-dim) tensor."""
  assert group_id >= 1 and group_id <= 8
  group_id -= 1
  begin = [0, group_id / 4, group_id / 2 % 2, group_id % 2, 0]
  stride = [1, 2, 2, 2, 1]

  dim = len(tensor.shape)
  if dim == 3:
    begin = begin[1:4]
    stride = stride[1:4]
  elif dim == 4:
    begin = begin[:-1]
    stride = stride[:-1]

  return tf.strided_slice(tensor, begin, tensor.shape, stride) 
开发者ID:angeladai,项目名称:ScanComplete,代码行数:18,代码来源:util.py

示例7: parser

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def parser(self, value):
    """Parse a Cifar10 record from value.

    Output images are in [height, width, depth] layout.
    """
    # Dimensions of the images in the CIFAR-10 dataset.
    # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
    # input format.
    label_bytes = 1
    image_bytes = HEIGHT * WIDTH * DEPTH
    # Every record consists of a label followed by the image, with a
    # fixed number of bytes for each.
    record_bytes = label_bytes + image_bytes

    # Convert from a string to a vector of uint8 that is record_bytes long.
    record_as_bytes = tf.decode_raw(value, tf.uint8)

    # The first bytes represent the label, which we convert from
    # uint8->int32.
    label = tf.cast(
        tf.strided_slice(record_as_bytes, [0], [label_bytes]), tf.int32)

    label.set_shape([1])

    # The remaining bytes after the label represent the image, which
    # we reshape from [depth * height * width] to [depth, height, width].
    depth_major = tf.reshape(
        tf.strided_slice(record_as_bytes, [label_bytes], [record_bytes]),
        [3, 32, 32])
    # Convert from [depth, height, width] to [height, width, depth].
    # This puts data in a compatible layout with TF image preprocessing APIs.
    image = tf.transpose(depth_major, [1, 2, 0])

    # Do custom preprocessing here.
    image = self.preprocess(image)

    return image, label 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:39,代码来源:cifar10.py

示例8: AddCrossEntropy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def AddCrossEntropy(batch_size, n):
  """Adds a cross entropy cost function."""
  cross_entropies = []
  def _Pass():
    return tf.constant(0, dtype=tf.float32, shape=[1])

  for beam_id in range(batch_size):
    beam_gold_slot = tf.reshape(
        tf.strided_slice(n['gold_slot'], [beam_id], [beam_id + 1]), [1])
    def _ComputeCrossEntropy():
      """Adds ops to compute cross entropy of the gold path in a beam."""
      # Requires a cast so that UnsortedSegmentSum, in the gradient,
      # is happy with the type of its input 'segment_ids', which
      # must be int32.
      idx = tf.cast(
          tf.reshape(
              tf.where(tf.equal(n['beam_ids'], beam_id)), [-1]), tf.int32)
      beam_scores = tf.reshape(tf.gather(n['all_path_scores'], idx), [1, -1])
      num = tf.shape(idx)
      return tf.nn.softmax_cross_entropy_with_logits(
          labels=tf.expand_dims(
              tf.sparse_to_dense(beam_gold_slot, num, [1.], 0.), 0),
          logits=beam_scores)
    # The conditional here is needed to deal with the last few batches of the
    # corpus which can contain -1 in beam_gold_slot for empty batch slots.
    cross_entropies.append(cf.cond(
        beam_gold_slot[0] >= 0, _ComputeCrossEntropy, _Pass))
  return {'cross_entropy': tf.div(tf.add_n(cross_entropies), batch_size)} 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:30,代码来源:structured_graph_builder.py

示例9: get_horizen_minAreaRectangle

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def get_horizen_minAreaRectangle(boxs, with_label=True):

    rpn_proposals_boxes_convert = tf.py_func(forward_convert,
                                             inp=[boxs, with_label],
                                             Tout=tf.float32)
    if with_label:
        rpn_proposals_boxes_convert = tf.reshape(rpn_proposals_boxes_convert, [-1, 9])

        boxes_shape = tf.shape(rpn_proposals_boxes_convert)
        x_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1] - 1],
                                  strides=[1, 2])
        y_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1] - 1],
                                  strides=[1, 2])

        label = tf.unstack(rpn_proposals_boxes_convert, axis=1)[-1]

        y_max = tf.reduce_max(y_list, axis=1)
        y_min = tf.reduce_min(y_list, axis=1)
        x_max = tf.reduce_max(x_list, axis=1)
        x_min = tf.reduce_min(x_list, axis=1)
        return tf.transpose(tf.stack([x_min, y_min, x_max, y_max, label], axis=0))
    else:
        rpn_proposals_boxes_convert = tf.reshape(rpn_proposals_boxes_convert, [-1, 8])

        boxes_shape = tf.shape(rpn_proposals_boxes_convert)
        x_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1]],
                                  strides=[1, 2])
        y_list = tf.strided_slice(rpn_proposals_boxes_convert, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1]],
                                  strides=[1, 2])

        y_max = tf.reduce_max(y_list, axis=1)
        y_min = tf.reduce_min(y_list, axis=1)
        x_max = tf.reduce_max(x_list, axis=1)
        x_min = tf.reduce_min(x_list, axis=1)

    return tf.transpose(tf.stack([x_min, y_min, x_max, y_max], axis=0)) 
开发者ID:DetectionTeamUCAS,项目名称:R2CNN_Faster-RCNN_Tensorflow,代码行数:38,代码来源:boxes_utils.py

示例10: local_flatten

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def local_flatten(x, kernel_size, name=None):
    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)
    assert isinstance(kernel_size, tuple)
    x = [[tf.strided_slice(x, (0, i, j), tf.shape(x)[:-1], (1,) + kernel_size)
          for j in range(kernel_size[1])] for i in range(kernel_size[0])]
    return tf.concat(reduce(lambda x, y: x + y, x), axis=-1, name=name) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:9,代码来源:ops.py

示例11: ptb_input_producer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def ptb_input_producer(raw_data, batch_size, num_steps, shuffle=False,
                       randomize=False):
  """
  Args:
    raw_data: np tensor of size [num_words].
    batch_size: self-explained.
    num_steps: number of BPTT steps.
  """

  num_batches_per_epoch = ((np.size(raw_data) // batch_size) - 1) // num_steps
  raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)

  data_len = tf.size(raw_data)
  batch_len = data_len // batch_size
  data = tf.reshape(raw_data[0 : batch_size * batch_len],
                    [batch_size, batch_len])

  epoch_size = (batch_len - 1) // num_steps
  with tf.device("/cpu:0"):
    epoch_size = tf.identity(epoch_size, name="epoch_size")
    
    if randomize:
      i = tf.random_uniform([1], minval=0, maxval=batch_len - num_steps,
                            dtype=tf.int32)
      i = tf.reduce_sum(i)
      x = tf.strided_slice(
        data, [0, i], [batch_size, i + num_steps])
      y = tf.strided_slice(
        data, [0, i + 1], [batch_size, i + num_steps + 1])
    else:
      i = tf.train.range_input_producer(epoch_size, shuffle=shuffle).dequeue()
      x = tf.strided_slice(
        data, [0, i * num_steps], [batch_size, (i + 1) * num_steps])
      y = tf.strided_slice(
        data, [0, i * num_steps + 1], [batch_size, (i + 1) * num_steps + 1])

    x.set_shape([batch_size, num_steps])
    y.set_shape([batch_size, num_steps])

  return x, y, num_batches_per_epoch 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:42,代码来源:data_utils.py

示例12: read

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def read(self, filename_queue):
        """Reads and parses examples from CIFAR-10 data files."""

        # Read a record, getting filenames from the filename_queue. No header
        # or footer in the CIFAR-10 format, so we leave header_bytes and
        # footer_bytes at their default of 0.
        reader = tf.FixedLengthRecordReader(record_bytes=RECORD_BYTES)
        _, value = reader.read(filename_queue)

        # Convert from a string to a vector of uint8 that is RECORD_BYTES long.
        record_bytes = tf.decode_raw(value, tf.uint8)

        with tf.name_scope('read_label', values=[record_bytes]):
            # The first bytes represent the label, which we convert from uint8
            # to int64.
            label = tf.strided_slice(record_bytes, [0], [LABEL_BYTES], [1])
            label = tf.cast(label, tf.int64)

        with tf.name_scope('read_image', values=[record_bytes]):
            # The reamining bytes after the label represent the image, which we
            # reshape from [depth * height * width] to [depth, height, width].
            image = tf.strided_slice(
                record_bytes, [LABEL_BYTES], [RECORD_BYTES], [1])
            image = tf.reshape(image, [DEPTH, HEIGHT, WIDTH])

            # Convert from [depth, height, width] to [height, width, depth].
            image = tf.transpose(image, [1, 2, 0])

            # Convert from uint8 to float32.
            image = tf.cast(image, tf.float32)

        return Record(image, [HEIGHT, WIDTH, DEPTH], label) 
开发者ID:rusty1s,项目名称:graph-based-image-classification,代码行数:34,代码来源:cifar_10.py

示例13: test_node_sequence

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def test_node_sequence(self):
        neighborhood = tf.constant([
            [1, 0, 3, -1],
            [2, 1, 0, -1],
        ])

        nodes = tf.constant([
            [0.5, 0.5, 0.5],
            [1.5, 1.5, 1.5],
            [2.5, 2.5, 2.5],
            [3.5, 3.5, 3.5],
        ])

        expected = [
            [[1.5, 1.5, 1.5], [0.5, 0.5, 0.5], [3.5, 3.5, 3.5], [0, 0, 0]],
            [[2.5, 2.5, 2.5], [1.5, 1.5, 1.5], [0.5, 0.5, 0.5], [0, 0, 0]],
        ]

        def _map_features(node):
            i = tf.maximum(node, 0)
            positive = tf.strided_slice(nodes, [i], [i+1], [1])
            negative = tf.zeros([1, 3])

            return tf.where(node < 0, negative, positive)

        with self.test_session() as sess:
            data = tf.reshape(neighborhood, [-1])
            data = tf.map_fn(_map_features, data, dtype=tf.float32)
            data = tf.reshape(data, [2, 4, 3])

            self.assertAllEqual(data.eval(), expected) 
开发者ID:rusty1s,项目名称:graph-based-image-classification,代码行数:33,代码来源:patchy_test.py

示例14: get_horizen_minAreaRectangle

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def get_horizen_minAreaRectangle(boxes, with_label=True):

    if with_label:
        boxes = tf.reshape(boxes, [-1, 9])

        boxes_shape = tf.shape(boxes)
        x_list = tf.strided_slice(boxes, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1] - 1],
                                  strides=[1, 2])
        y_list = tf.strided_slice(boxes, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1] - 1],
                                  strides=[1, 2])

        label = tf.unstack(boxes, axis=1)[-1]

        y_max = tf.reduce_max(y_list, axis=1)
        y_min = tf.reduce_min(y_list, axis=1)
        x_max = tf.reduce_max(x_list, axis=1)
        x_min = tf.reduce_min(x_list, axis=1)
        return tf.transpose(tf.stack([x_min, y_min, x_max, y_max, label], axis=0))
    else:
        boxes = tf.reshape(boxes, [-1, 8])

        boxes_shape = tf.shape(boxes)
        x_list = tf.strided_slice(boxes, begin=[0, 0], end=[boxes_shape[0], boxes_shape[1]],
                                  strides=[1, 2])
        y_list = tf.strided_slice(boxes, begin=[0, 1], end=[boxes_shape[0], boxes_shape[1]],
                                  strides=[1, 2])

        y_max = tf.reduce_max(y_list, axis=1)
        y_min = tf.reduce_min(y_list, axis=1)
        x_max = tf.reduce_max(x_list, axis=1)
        x_min = tf.reduce_min(x_list, axis=1)

    return tf.transpose(tf.stack([x_min, y_min, x_max, y_max], axis=0)) 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:35,代码来源:coordinate_convert.py

示例15: ptb_producer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import strided_slice [as 别名]
def ptb_producer(raw_data, batch_size, num_steps, name=None):
    """Iterate on the raw PTB data.
    This chunks up raw_data into batches of examples and returns Tensors that
    are drawn from these batches.
    Args:
      raw_data: one of the raw data outputs from ptb_raw_data.
      batch_size: int, the batch size.
      num_steps: int, the number of unrolls.
      name: the name of this operation (optional).
    Returns:
      A pair of Tensors, each shaped [batch_size, num_steps]. The second element
      of the tuple is the same data time-shifted to the right by one.
    Raises:
      tf.errors.InvalidArgumentError: if batch_size or num_steps are too high.
    """
    with tf.name_scope(name, "PTBProducer", [raw_data, batch_size, num_steps]):
        raw_data = tf.convert_to_tensor(
            raw_data, name="raw_data", dtype=tf.int32)

        data_len = tf.size(raw_data)
        batch_len = data_len // batch_size
        data = tf.reshape(raw_data[0: batch_size * batch_len],
                          [batch_size, batch_len])

        epoch_size = (batch_len - 1) // num_steps
        assertion = tf.assert_positive(
            epoch_size,
            message="epoch_size == 0, decrease batch_size or num_steps")
        with tf.control_dependencies([assertion]):
            epoch_size = tf.identity(epoch_size, name="epoch_size")

        i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
        x = tf.strided_slice(data, [0, i * num_steps],
                             [batch_size, (i + 1) * num_steps])
        x.set_shape([batch_size, num_steps])
        y = tf.strided_slice(data, [0, i * num_steps + 1],
                             [batch_size, (i + 1) * num_steps + 1])
        y.set_shape([batch_size, num_steps])
        return x, y 
开发者ID:qinyao-he,项目名称:bit-rnn,代码行数:41,代码来源:reader.py


注:本文中的tensorflow.strided_slice方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。