当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.pad方法代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.pad方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.pad方法的具体用法?Python array_ops.pad怎么用?Python array_ops.pad使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.pad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _SliceGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _SliceGrad(op, grad):
  """Gradient for Slice op."""
  # Create an Nx2 padding where the first column represents how many
  # zeros are to be prepended for each dimension, and the second
  # column indicates how many zeros are appended.
  #
  # The number of zeros to append is the shape of the input
  # elementwise-subtracted by both the begin vector and sizes vector.
  #
  # Some more reshaping is needed to assemble this tensor with the
  # right dimensions.
  input_vec = op.inputs[0]
  begin_vec = op.inputs[1]
  input_rank = array_ops.rank(input_vec)
  slice_size = array_ops.shape(op.outputs[0])

  shape = array_ops.stack([input_rank, 1])
  before_pad = array_ops.reshape(begin_vec, shape)
  after_pad = array_ops.reshape(
      array_ops.shape(input_vec) - slice_size - begin_vec, shape)
  paddings = array_ops.concat([before_pad, after_pad], 1)
  return array_ops.pad(grad, paddings), None, None 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:array_grad.py

示例2: _process_matrix

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _process_matrix(self, matrix, min_rank, event_ndims):
    """Helper to __init__ which gets matrix in batch-ready form."""
    # Pad the matrix so that matmul works in the case of a matrix and vector
    # input.  Keep track if the matrix was padded, to distinguish between a
    # rank 3 tensor and a padded rank 2 tensor.
    # TODO(srvasude): Remove side-effects from functions. Its currently unbroken
    # but error-prone since the function call order may change in the future.
    self._rank_two_event_ndims_one = math_ops.logical_and(
        math_ops.equal(array_ops.rank(matrix), min_rank),
        math_ops.equal(event_ndims, 1))
    left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
    pad = array_ops.concat(
        [array_ops.ones(
            [left], dtype=dtypes.int32), array_ops.shape(matrix)],
        0)
    return array_ops.reshape(matrix, pad) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:bijector.py

示例3: _SliceGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _SliceGrad(op, grad):
  """Gradient for Slice op."""
  # Create an Nx2 padding where the first column represents how many
  # zeros are to be prepended for each dimension, and the second
  # column indicates how many zeros are appended.
  #
  # The number of zeros to append is the shape of the input
  # elementwise-subtracted by both the begin vector and sizes vector.
  #
  # Some more reshaping is needed to assemble this tensor with the
  # right dimensions.
  input_vec = op.inputs[0]
  begin_vec = op.inputs[1]
  input_rank = array_ops.rank(input_vec)
  slice_size = array_ops.shape(op.outputs[0])

  shape = array_ops.pack([input_rank, 1])
  before_pad = array_ops.reshape(begin_vec, shape)
  after_pad = array_ops.reshape(
      array_ops.shape(input_vec) - slice_size - begin_vec, shape)
  paddings = array_ops.concat(1, [before_pad, after_pad])
  return array_ops.pad(grad, paddings), None, None 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:24,代码来源:array_grad.py

示例4: _test_pad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _test_pad(data, mode="CONSTANT", quantized=False):
    """ One iteration of PAD """

    assert len(data) == 2

    # Test with tensor and constant
    with tf.Graph().as_default():
        in_data = [array_ops.placeholder(shape=data[0].shape, dtype='float32', name='in')]

        if quantized:
            # fake_quant will keep the tensors in float32 until the conversion in the session
            input_range = {'inq_0': (-100, 100)}
            inq_data = [tf.quantization.fake_quant_with_min_max_args(in_data[0],
                                                                     min=-100,
                                                                     max=100,
                                                                     name="inq_0")]
            out = array_ops.pad(inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
            compare_tflite_with_tvm([data[0]], ['inq_0:0'], inq_data, [out], quantized=True,
                                    input_range=input_range)
        else:
            out = array_ops.pad(in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode)
            compare_tflite_with_tvm([data[0]], ['in:0'], in_data, [out]) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:24,代码来源:test_forward.py

示例5: resize_audio_with_crop_or_pad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def resize_audio_with_crop_or_pad(image, target_height, target_width,
                                  dynamic_shape=False):
  image = tf.convert_to_tensor(image, name='audio')
  original_height,  _ =     _ImageDimensions(image, dynamic_shape=dynamic_shape)

  if target_height <= 0:
    raise ValueError('target_height must be > 0.')

  if dynamic_shape:
    max_ = math_ops.maximum
    min_ = math_ops.minimum
  else:
    max_ = max
    min_ = min

  height_diff = target_height - original_height
  offset_crop_height = max_(-height_diff // 2, 0)
  offset_pad_height = max_(height_diff // 2, 0)

  # Maybe crop if needed.
  cropped = crop_to_1d_bounding_box(image, offset_crop_height,
                                 min_(target_height, original_height),
                                 dynamic_shape=dynamic_shape)

  # Maybe pad if needed.
  resized = pad_to_1d_bounding_box(cropped, offset_pad_height, 
                                target_height, 
                                dynamic_shape=dynamic_shape)

  if resized.get_shape().ndims is None:
    raise ValueError('resized contains no shape.')
  if not resized.get_shape()[0].is_compatible_with(target_height):
    raise ValueError('resized height is not correct.')
  return resized


# In[5]: 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:39,代码来源:resize_audio_patch.py

示例6: temporal_padding

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def temporal_padding(x, padding=(1, 1)):
  """Pads the middle dimension of a 3D tensor.

  Arguments:
      x: Tensor or variable.
      padding: Tuple of 2 integers, how many zeros to
          add at the start and end of dim 1.

  Returns:
      A padded 3D tensor.
  """
  assert len(padding) == 2
  pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
  return array_ops.pad(x, pattern) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:16,代码来源:backend.py

示例7: spatial_2d_padding

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
  """Pads the 2nd and 3rd dimensions of a 4D tensor.

  Arguments:
      x: Tensor or variable.
      padding: Tuple of 2 tuples, padding pattern.
      data_format: One of `channels_last` or `channels_first`.

  Returns:
      A padded 4D tensor.

  Raises:
      ValueError: if `data_format` is neither
          `channels_last` or `channels_first`.
  """
  assert len(padding) == 2
  assert len(padding[0]) == 2
  assert len(padding[1]) == 2
  if data_format is None:
    data_format = image_data_format()
  if data_format not in {'channels_first', 'channels_last'}:
    raise ValueError('Unknown data_format ' + str(data_format))

  if data_format == 'channels_first':
    pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
  else:
    pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
  return array_ops.pad(x, pattern) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:backend.py

示例8: spatial_3d_padding

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
  """Pads 5D tensor with zeros along the depth, height, width dimensions.

  Pads these dimensions with respectively
  "padding[0]", "padding[1]" and "padding[2]" zeros left and right.

  For 'channels_last' data_format,
  the 2nd, 3rd and 4th dimension will be padded.
  For 'channels_first' data_format,
  the 3rd, 4th and 5th dimension will be padded.

  Arguments:
      x: Tensor or variable.
      padding: Tuple of 3 tuples, padding pattern.
      data_format: One of `channels_last` or `channels_first`.

  Returns:
      A padded 5D tensor.

  Raises:
      ValueError: if `data_format` is neither
          `channels_last` or `channels_first`.

  """
  assert len(padding) == 3
  assert len(padding[0]) == 2
  assert len(padding[1]) == 2
  assert len(padding[2]) == 2
  if data_format is None:
    data_format = image_data_format()
  if data_format not in {'channels_first', 'channels_last'}:
    raise ValueError('Unknown data_format ' + str(data_format))

  if data_format == 'channels_first':
    pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
               [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
  else:
    pattern = [[0, 0], [padding[0][0], padding[0][1]],
               [padding[1][0], padding[1][1]], [padding[2][0],
                                                padding[2][1]], [0, 0]]
  return array_ops.pad(x, pattern) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:43,代码来源:backend.py

示例9: _strict_1d_cumsum

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _strict_1d_cumsum(tensor, len_tensor):
  """Cumsum of a 1D tensor with defined shape by padding and convolving."""
  # Assumes tensor shape is fully defined.
  with ops.name_scope('strict_1d_cumsum', values=[tensor]):
    if len_tensor == 0:
      return constant_op.constant([])
    len_pad = len_tensor - 1
    x = array_ops.pad(tensor, [[len_pad, 0]])
    h = array_ops.ones_like(x)
    return _strict_conv1d(x, h)[:len_tensor]


# TODO(langmore) Remove once a faster cumsum (accumulate_sum) Op is available.
# See:  https://github.com/tensorflow/tensorflow/issues/813 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:16,代码来源:histogram_ops.py

示例10: _forward

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _forward(self, x):
    # Pad the last dim with a zeros vector. We need this because it lets us
    # infer the scale in the inverse function.
    y = array_ops.expand_dims(x, dim=-1) if self._static_event_ndims == 0 else x
    ndims = (y.get_shape().ndims if y.get_shape().ndims is not None
             else array_ops.rank(y))
    y = array_ops.pad(y,
                      paddings=array_ops.concat(
                          (array_ops.zeros(
                              (ndims - 1, 2), dtype=dtypes.int32), [[0, 1]]),
                          0))

    # Set shape hints.
    if x.get_shape().ndims is not None:
      shape = x.get_shape().as_list()
      if self._static_event_ndims == 0:
        shape += [2]
      elif shape[-1] is not None:
        shape[-1] += 1
      shape = tensor_shape.TensorShape(shape)
      y.get_shape().assert_is_compatible_with(shape)
      y.set_shape(shape)

    # Since we only support event_ndims in [0, 1] and we do padding, we always
    # reduce over the last dimension, i.e., dim=-1 (which is the default).
    return nn_ops.softmax(y) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:softmax_centered_impl.py

示例11: testFuseResizePadAndConv

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def testFuseResizePadAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      resize_op = image_ops.resize_bilinear(
          input_op, [12, 4], align_corners=False)
      pad_op = array_ops.pad(resize_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
                             mode="REFLECT")
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("MirrorPad", node.op)
      self.assertNotEqual("ResizeBilinear", node.op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:32,代码来源:optimize_for_inference_test.py

示例12: testFusePadAndConv

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def testFusePadAndConv(self):
    with self.test_session() as sess:
      inputs = [1, 4, 2, 5, 3, 6, -1, -4, -2, -5, -3, -6]
      input_op = constant_op.constant(
          np.array(inputs), shape=[1, 2, 3, 2], dtype=dtypes.float32)
      pad_op = array_ops.pad(input_op, [[0, 0], [1, 1], [2, 2], [0, 0]],
                             mode="REFLECT")
      weights = [1, 2, 3, 4, 0.1, 0.2, 0.3, 0.4]
      weights_op = constant_op.constant(
          np.array(weights), shape=[1, 2, 2, 2], dtype=dtypes.float32)
      nn_ops.conv2d(
          pad_op, weights_op, [1, 1, 1, 1], padding="VALID", name="output")
      original_graph_def = sess.graph_def
      original_result = sess.run(["output:0"])
    optimized_graph_def = optimize_for_inference_lib.fuse_resize_and_conv(
        original_graph_def, ["output"])

    with self.test_session() as sess:
      _ = importer.import_graph_def(
          optimized_graph_def, input_map={}, name="optimized")
      optimized_result = sess.run(["optimized/output:0"])

    self.assertAllClose(original_result, optimized_result)

    for node in optimized_graph_def.node:
      self.assertNotEqual("Conv2D", node.op)
      self.assertNotEqual("MirrorPad", node.op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:29,代码来源:optimize_for_inference_test.py

示例13: _infer_batch_ndims

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _infer_batch_ndims(self):
    """Return batch_ndims."""
    if self._is_only_identity_multiplier:
      return 0
    # The real batch dims is one less when we pad in the case of event_ndims =
    # 1, and the rank of the underlying scale being 2. This allows us to have
    # non-negative sample dims.
    return (self._scale.rank() - 2 -
            array_ops.where(self._rank_two_event_ndims_one, 1, 0)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:11,代码来源:bijector.py

示例14: _forward_log_det_jacobian

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def _forward_log_det_jacobian(self, x):
    if self._is_only_identity_multiplier:
      # TODO(jvdillon): We don't pad in this case and instead let the fldj be
      # applied via broadcast.
      d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
      return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
          math_ops.equal(self.shaper.event_ndims, 0), 1., d)
    fldj = self._scale.sqrt_log_abs_det()
    # We need to squeeze off the padded dimension.
    start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
    return array_ops.reshape(fldj, array_ops.shape(fldj)[start:]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:13,代码来源:bijector.py

示例15: test

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import pad [as 别名]
def test(self):
    pad_lt = ops.pad(self.original_lt,
                     {'x': (1, 1),
                      'channel': ([], ['alpha'])})

    golden_op = array_ops.pad(self.original_lt.tensor, [[1, 1], [0, 1], [0, 0],
                                                        [0, 0]])
    golden_axes = [('x', self.x_size + 2),
                   ('channel', ['red', 'green', 'blue', 'alpha']), self.a2,
                   self.a3]
    golden_lt = core.LabeledTensor(golden_op, golden_axes)
    self.assertLabeledTensorsEqual(pad_lt, golden_lt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:14,代码来源:ops_test.py


注:本文中的tensorflow.python.ops.array_ops.pad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。