当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.transpose方法代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.transpose方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.transpose方法的具体用法?Python array_ops.transpose怎么用?Python array_ops.transpose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _transpose_batch_time

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _transpose_batch_time(x):
    """Transpose the batch and time dimensions of a Tensor.
    Retains as much of the static shape information as possible.
    Args:
        x: A tensor of rank 2 or higher.
    Returns:
        x transposed along the first two dimensions.
    Raises:
        ValueError: if `x` is rank 1 or lower.
    """
    x_static_shape = x.get_shape()
    if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
        raise ValueError(
            "Expected input tensor %s to have rank at least 2, but saw shape: %s" %
            (x, x_static_shape))
    x_rank = array_ops.rank(x)
    x_t = array_ops.transpose(
        x, array_ops.concat(
            ([1, 0], math_ops.range(2, x_rank)), axis=0))
    x_t.set_shape(
        tensor_shape.TensorShape([
            x_static_shape[1].value, x_static_shape[0].value
        ]).concatenate(x_static_shape[2:]))
    return x_t 
开发者ID:hirofumi0810,项目名称:tensorflow_end2end_speech_recognition,代码行数:26,代码来源:dynamic_decoder.py

示例2: _TileGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _TileGrad(op, grad):
  """Sum reduces grad along the tiled dimensions."""
  assert isinstance(grad, ops.Tensor)
  input_shape = array_ops.shape(op.inputs[0])
  # We interleave multiples and input_shape to get split_shape,
  # reshape grad to split_shape, and reduce along all even
  # dimensions (the tiled dimensions) to get the result
  # with shape input_shape.  For example
  #   input_shape = [20, 30, 40]
  #   multiples = [2, 3, 4]
  #   split_shape = [2, 20, 3, 30, 4, 40]
  #   axes = [0, 2, 4]
  split_shape = array_ops.reshape(
      array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
  axes = math_ops.range(0, array_ops.size(split_shape), 2)
  input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
  # Fix shape inference
  input_grad.set_shape(op.inputs[0].get_shape())
  return [input_grad, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:array_grad.py

示例3: transpose_image

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def transpose_image(image):
  """Transpose an image by swapping the first and second dimension.

  See also `transpose()`.

  Args:
    image: 3-D tensor of shape `[height, width, channels]`

  Returns:
    A 3-D tensor of shape `[width, height, channels]`

  Raises:
    ValueError: if the shape of `image` not supported.
  """
  image = ops.convert_to_tensor(image, name='image')
  image = control_flow_ops.with_dependencies(
      _Check3DImage(image, require_static=False), image)
  return array_ops.transpose(image, [1, 0, 2], name='transpose_image') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:image_ops_impl.py

示例4: _sample_n

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _sample_n(self, n, seed=None):
    n_draws = math_ops.cast(self.total_count, dtype=dtypes.int32)
    if self.total_count.get_shape().ndims is not None:
      if self.total_count.get_shape().ndims != 0:
        raise NotImplementedError(
            "Sample only supported for scalar number of draws.")
    elif self.validate_args:
      is_scalar = check_ops.assert_rank(
          n_draws, 0,
          message="Sample only supported for scalar number of draws.")
      n_draws = control_flow_ops.with_dependencies([is_scalar], n_draws)
    k = self.event_shape_tensor()[0]
    # Flatten batch dims so logits has shape [B, k],
    # where B = reduce_prod(self.batch_shape_tensor()).
    draws = random_ops.multinomial(
        logits=array_ops.reshape(self.logits, [-1, k]),
        num_samples=n * n_draws,
        seed=seed)
    draws = array_ops.reshape(draws, shape=[-1, n, n_draws])
    x = math_ops.reduce_sum(array_ops.one_hot(draws, depth=k),
                            axis=-2)  # shape: [B, n, k]
    x = array_ops.transpose(x, perm=[1, 0, 2])
    final_shape = array_ops.concat([[n], self.batch_shape_tensor(), [k]], 0)
    return array_ops.reshape(x, final_shape) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:multinomial.py

示例5: call

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def call(self, inputs):
    pool_shape = (1,) + self.pool_size + (1,)
    strides = (1,) + self.strides + (1,)

    if self.data_format == 'channels_first':
      # TF does not support `channels_first` with 3D pooling operations,
      # so we must handle this case manually.
      # TODO(fchollet): remove this when TF pooling is feature-complete.
      inputs = array_ops.transpose(inputs, (0, 2, 3, 4, 1))

    outputs = self.pool_function(
        inputs,
        ksize=pool_shape,
        strides=strides,
        padding=self.padding.upper())

    if self.data_format == 'channels_first':
      outputs = array_ops.transpose(outputs, (0, 4, 1, 2, 3))
    return outputs 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:pooling.py

示例6: NHWCToNCHW

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def NHWCToNCHW(input_tensor):
  """Converts the input from the NHWC format to NCHW.

  Args:
    input_tensor: a 4- or 5-D tensor, or an array representing shape

  Returns:
    converted tensor or shape array
  """
  # tensor dim -> new axis order
  new_axes = {
      4: [0, 3, 1, 2],
      5: [0, 4, 1, 2, 3]
  }
  if isinstance(input_tensor, ops.Tensor):
    ndims = input_tensor.shape.ndims
    return array_ops.transpose(input_tensor, new_axes[ndims])
  else:
    ndims = len(input_tensor)
    return [input_tensor[a] for a in new_axes[ndims]] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:test_util.py

示例7: NCHWToNHWC

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def NCHWToNHWC(input_tensor):
  """Converts the input from the NCHW format to NHWC.

  Args:
    input_tensor: a 4- or 5-D tensor, or an array representing shape

  Returns:
    converted tensor or shape array
  """
  # tensor dim -> new axis order
  new_axes = {
      4: [0, 2, 3, 1],
      5: [0, 2, 3, 4, 1]
  }
  if isinstance(input_tensor, ops.Tensor):
    ndims = input_tensor.shape.ndims
    return array_ops.transpose(input_tensor, new_axes[ndims])
  else:
    ndims = len(input_tensor)
    return [input_tensor[a] for a in new_axes[ndims]] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:test_util.py

示例8: tensors_to_item

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a contatenated list of bboxes.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [num_boxes, 4] tensor of bounding box coordinates,
        i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
    """
    sides = []
    for key in self._full_keys:
      side = array_ops.expand_dims(keys_to_tensors[key].values, 0)
      sides.append(side)

    bounding_box = array_ops.concat(sides, 0)
    return array_ops.transpose(bounding_box) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:tfexample_decoder.py

示例9: _compute_euclidean_distance

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _compute_euclidean_distance(cls, inputs, clusters):
    """Computes Euclidean distance between each input and each cluster center.

    Args:
      inputs: list of input Tensors.
      clusters: cluster Tensor.

    Returns:
      list of Tensors, where each element corresponds to each element in inputs.
      The value is the distance of each row to all the cluster centers.
    """
    output = []
    for inp in inputs:
      with ops.colocate_with(inp):
        # Computes Euclidean distance. Note the first and third terms are
        # broadcast additions.
        squared_distance = (math_ops.reduce_sum(
            math_ops.square(inp), 1, keep_dims=True) - 2 * math_ops.matmul(
                inp, clusters, transpose_b=True) + array_ops.transpose(
                    math_ops.reduce_sum(
                        math_ops.square(clusters), 1, keep_dims=True)))
        output.append(squared_distance)

    return output 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:clustering_ops.py

示例10: _define_full_covariance_probs

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _define_full_covariance_probs(self, shard_id, shard):
    """Defines the full covariance probabilties per example in a class.

    Updates a matrix with dimension num_examples X num_classes.

    Args:
      shard_id: id of the current shard.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    diff = shard - self._means
    cholesky = linalg_ops.cholesky(self._covs + self._min_var)
    log_det_covs = 2.0 * math_ops.reduce_sum(
        math_ops.log(array_ops.matrix_diag_part(cholesky)), 1)
    x_mu_cov = math_ops.square(
        linalg_ops.matrix_triangular_solve(
            cholesky, array_ops.transpose(
                diff, perm=[0, 2, 1]), lower=True))
    diag_m = array_ops.transpose(math_ops.reduce_sum(x_mu_cov, 1))
    self._probs[shard_id] = -0.5 * (diag_m + math_ops.to_float(self._dimensions)
                                    * math_ops.log(2 * np.pi) + log_det_covs) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:gmm_ops.py

示例11: _define_partial_maximization_operation

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def _define_partial_maximization_operation(self, shard_id, shard):
    """Computes the partial statistics of the means and covariances.

    Args:
      shard_id: current shard id.
      shard: current data shard, 1 X num_examples X dimensions.
    """
    # Soft assignment of each data point to each of the two clusters.
    self._points_in_k[shard_id] = math_ops.reduce_sum(
        self._w[shard_id], 0, keep_dims=True)
    # Partial means.
    w_mul_x = array_ops.expand_dims(
        math_ops.matmul(
            self._w[shard_id], array_ops.squeeze(shard, [0]), transpose_a=True),
        1)
    self._w_mul_x.append(w_mul_x)
    # Partial covariances.
    x = array_ops.concat([shard for _ in range(self._num_classes)], 0)
    x_trans = array_ops.transpose(x, perm=[0, 2, 1])
    x_mul_w = array_ops.concat([
        array_ops.expand_dims(x_trans[k, :, :] * self._w[shard_id][:, k], 0)
        for k in range(self._num_classes)
    ], 0)
    self._w_mul_x2.append(math_ops.matmul(x_mul_w, x)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:gmm_ops.py

示例12: separable_lstm

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def separable_lstm(images, num_filters_out, nhidden=None, scope=None):
  """Run bidirectional LSTMs first horizontally then vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    nhidden: hidden layer depth
    scope: optional scope name

  Returns:
    (num_images, height, width, num_filters_out) tensor
  """
  with variable_scope.variable_scope(scope, "SeparableLstm", [images]):
    if nhidden is None:
      nhidden = num_filters_out
    hidden = horizontal_lstm(images, nhidden)
    with variable_scope.variable_scope("vertical"):
      transposed = array_ops.transpose(hidden, [0, 2, 1, 3])
      output_transposed = horizontal_lstm(transposed, num_filters_out)
    output = array_ops.transpose(output_transposed, [0, 2, 1, 3])
    return output 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:lstm2d.py

示例13: reduce_to_sequence

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def reduce_to_sequence(images, num_filters_out, scope=None):
  """Reduce an image to a sequence by scanning an LSTM vertically.

  Args:
    images: (num_images, height, width, depth) tensor
    num_filters_out: output layer depth
    scope: optional scope name

  Returns:
    A (width, num_images, num_filters_out) sequence.
  """
  with variable_scope.variable_scope(scope, "ReduceToSequence", [images]):
    batch_size, height, width, depth = _shape(images)
    transposed = array_ops.transpose(images, [1, 0, 2, 3])
    reshaped = array_ops.reshape(transposed,
                                 [height, batch_size * width, depth])
    reduced = lstm1d.sequence_to_final(reshaped, num_filters_out)
    output = array_ops.reshape(reduced, [batch_size, width, num_filters_out])
    return output 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:lstm2d.py

示例14: impose_axis_order

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def impose_axis_order(labeled_tensor, axis_order=None, name=None):
  """Impose desired axis order on a labeled tensor.

  Args:
    labeled_tensor: The input tensor.
    axis_order: Optional desired axis order, as a list of names. If not
      provided, defaults to the current axis_order_scope (if set).
    name: Optional op name.

  Returns:
    Labeled tensor with possibly transposed axes.

  Raises:
    AxisOrderError: If no axis_order is provided or axis_order does not contain
      all axes on the input tensor.
  """
  with ops.name_scope(name, 'lt_impose_axis_order', [labeled_tensor]) as scope:
    labeled_tensor = convert_to_labeled_tensor(labeled_tensor)

    if axis_order is None:
      axis_order = _get_valid_axis_order()

    relevant_axis_order = [a for a in axis_order if a in labeled_tensor.axes]

    return transpose(labeled_tensor, relevant_axis_order, name=scope) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:core.py

示例15: setUp

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import transpose [as 别名]
def setUp(self):
    self._tmp_dir = tempfile.mktemp()

    self.v = variables.Variable(10.0, name="v")
    self.delta = constant_op.constant(1.0, name="delta")
    self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")

    self.ph = array_ops.placeholder(dtypes.float32, name="ph")
    self.xph = array_ops.transpose(self.ph, name="xph")
    self.m = constant_op.constant(
        [[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
    self.y = math_ops.matmul(self.m, self.xph, name="y")

    self.sess = session.Session()

    # Initialize variable.
    self.sess.run(self.v.initializer) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:local_cli_wrapper_test.py


注:本文中的tensorflow.python.ops.array_ops.transpose方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。