当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.diag_part函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.diag_part函数的典型用法代码示例。如果您正苦于以下问题:Python diag_part函数的具体用法?Python diag_part怎么用?Python diag_part使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了diag_part函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testOddRank

 def testOddRank(self):
   w = np.random.rand(2)
   x = np.random.rand(2, 2, 2)
   self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
   self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
   with self.assertRaises(ValueError):
     array_ops.diag_part(0.0)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py

示例2: __call__

  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 3 or len(shape) > 5:
      raise ValueError("The tensor to initialize must be at least "
                       "three-dimensional and at most five-dimensional")

    if shape[-2] > shape[-1]:
      raise ValueError("In_filters cannot be greater than out_filters.")

    # Generate a random matrix
    a = random_ops.random_normal([shape[-1], shape[-1]],
                                 dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    d = array_ops.diag_part(r)
    q *= math_ops.sign(d)
    q = q[:shape[-2], :]
    q *= math_ops.sqrt(math_ops.cast(self.gain, dtype=dtype))
    if len(shape) == 3:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    elif len(shape) == 4:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    else:
      weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2,
                                      (shape[2]-1)//2]],
                                    array_ops.expand_dims(q, 0), shape)
    return weight
开发者ID:moses-sun,项目名称:tensorflow,代码行数:32,代码来源:init_ops.py

示例3: _diagPartOp

 def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):
   with self.cached_session(use_gpu=use_gpu):
     tensor = ops.convert_to_tensor(tensor.astype(dtype))
     tf_ans_inv = array_ops.diag_part(tensor)
     inv_out = self.evaluate(tf_ans_inv)
   self.assertAllClose(inv_out, expected_ans)
   self.assertShapeEqual(expected_ans, tf_ans_inv)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:diag_op_test.py

示例4: __call__

  def __call__(self, shape, dtype=None, partition_info=None):
    if dtype is None:
      dtype = self.dtype
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (num_rows, num_cols)

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    square_len = math_ops.minimum(num_rows, num_cols)
    d = array_ops.diag_part(r[:square_len, :square_len])
    ph = d / math_ops.abs(d)
    q *= ph
    # Pad zeros to Q (if rows smaller than cols)
    if num_rows < num_cols:
      padding = array_ops.zeros([num_rows, num_cols - num_rows], dtype=dtype)
      q = array_ops.concat([q, padding], 1)
    return self.gain * array_ops.reshape(q, shape)
开发者ID:finardi,项目名称:tensorflow,代码行数:29,代码来源:init_ops.py

示例5: __call__

  def __call__(self, shape, dtype=dtypes.float32):
    """Returns a tensor object initialized as specified by the initializer.

    Args:
      shape: Shape of the tensor.
      dtype: Optional dtype of the tensor. Only floating point types are
       supported.

    Raises:
      ValueError: If the dtype is not floating point or the input shape is not
       valid.
    """
    dtype = _assert_float_dtype(dtype)
    # Check the shape
    if len(shape) < 2:
      raise ValueError("The tensor to initialize must be "
                       "at least two-dimensional")
    # Flatten the input shape with the last dimension remaining
    # its original shape so it works for conv2d
    num_rows = 1
    for dim in shape[:-1]:
      num_rows *= dim
    num_cols = shape[-1]
    flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows))

    # Generate a random matrix
    a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
    # Compute the qr factorization
    q, r = gen_linalg_ops.qr(a, full_matrices=False)
    # Make Q uniform
    d = array_ops.diag_part(r)
    q *= math_ops.sign(d)
    if num_rows < num_cols:
      q = array_ops.matrix_transpose(q)
    return self.gain * array_ops.reshape(q, shape)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:35,代码来源:init_ops_v2.py

示例6: trace

def trace(x, name=None):
  """ Compute the trace of a tensor `x`.

  `trace(x)` returns the sum of along the diagonal.

  For example:

  ```python
  # 'x' is [[1, 1],
  #         [1, 1]]
  tf.trace(x) ==> 2

  # 'x' is [[1,2,3],
  #         [4,5,6],
  #         [7,8,9]]
  tf.trace(x) ==> 15
  ```

  Args:
    x: 2-D tensor.
    name: A name for the operation (optional).

  Returns:
    The trace of input tensor.
  """
  with ops.op_scope([x], name, "Trace") as name:
    x = ops.convert_to_tensor(x, name="x")
    if len(x.get_shape()) != 2:
      raise ValueError("Expected a tensor with rank 2, rank %d tensor received"
                       % len(x.get_shape()))
    return reduce_sum(array_ops.diag_part(x), name=name)
开发者ID:Adamor1,项目名称:tensorflow,代码行数:31,代码来源:math_ops.py

示例7: _diagOp

 def _diagOp(self, diag, dtype, expected_ans, use_gpu):
   with self.cached_session(use_gpu=use_gpu):
     tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
     out = self.evaluate(tf_ans)
     tf_ans_inv = array_ops.diag_part(expected_ans)
     inv_out = self.evaluate(tf_ans_inv)
   self.assertAllClose(out, expected_ans)
   self.assertAllClose(inv_out, diag)
   self.assertShapeEqual(expected_ans, tf_ans)
   self.assertShapeEqual(diag, tf_ans_inv)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:10,代码来源:diag_op_test.py

示例8: diagOp

 def diagOp(self, diag, dtype, expected_ans, use_gpu=False):
   with self.test_session(use_gpu=use_gpu):
     tf_ans = array_ops.diag(ops.convert_to_tensor(diag.astype(dtype)))
     out = tf_ans.eval()
     tf_ans_inv = array_ops.diag_part(expected_ans)
     inv_out = tf_ans_inv.eval()
   self.assertAllClose(out, expected_ans)
   self.assertAllClose(inv_out, diag)
   self.assertShapeEqual(expected_ans, tf_ans)
   self.assertShapeEqual(diag, tf_ans_inv)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:10,代码来源:diag_op_test.py

示例9: testRankFourFloatTensorUnknownShape

 def testRankFourFloatTensorUnknownShape(self):
   x = np.random.rand(3, 3)
   i = np.arange(3)
   expected_ans = x[i, i]
   for shape in None, (None, 3), (3, None):
     with self.cached_session(use_gpu=False):
       t = ops.convert_to_tensor(x.astype(np.float32))
       t.set_shape(shape)
       tf_ans = array_ops.diag_part(t)
       out = self.evaluate(tf_ans)
     self.assertAllClose(out, expected_ans)
     self.assertShapeEqual(expected_ans, tf_ans)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:12,代码来源:diag_op_test.py

示例10: _initialize_variables

  def _initialize_variables(self, data, initial_means=None):
    """Initializes variables.

    Args:
      data: a list of Tensors with data, each row is a new example.
      initial_means: a Tensor with a matrix of means.
    """
    first_shard = data[0]
    # Initialize means: num_classes X 1 X dimensions.
    if initial_means is not None:
      means = array_ops.expand_dims(initial_means, 1)
    else:
      # Sample data randomly
      means = array_ops.expand_dims(
          _init_clusters_random(data, self._num_classes, self._random_seed), 1)

    # Initialize covariances.
    if self._covariance_type == FULL_COVARIANCE:
      cov = _covariance(first_shard, False) + self._min_var
      # A matrix per class, num_classes X dimensions X dimensions
      covs = array_ops.tile(
          array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
    elif self._covariance_type == DIAG_COVARIANCE:
      cov = _covariance(first_shard, True) + self._min_var
      # A diagonal per row, num_classes X dimensions.
      covs = array_ops.tile(
          array_ops.expand_dims(array_ops.diag_part(cov), 0),
          [self._num_classes, 1])

    with ops.colocate_with(self._cluster_centers_initialized):
      initialized = control_flow_ops.with_dependencies(
          [means, covs],
          array_ops.identity(self._cluster_centers_initialized))
    self._init_ops = []
    with ops.colocate_with(self._means):
      init_means = state_ops.assign(self._means, means, validate_shape=False)
      init_means = control_flow_ops.with_dependencies(
          [init_means],
          state_ops.assign(self._cluster_centers_initialized, True))
      self._init_ops.append(control_flow_ops.cond(initialized,
                                                  control_flow_ops.no_op,
                                                  lambda: init_means).op)
    with ops.colocate_with(self._covs):
      init_covs = state_ops.assign(self._covs, covs, validate_shape=False)
      init_covs = control_flow_ops.with_dependencies(
          [init_covs],
          state_ops.assign(self._cluster_centers_initialized, True))
      self._init_ops.append(control_flow_ops.cond(initialized,
                                                  control_flow_ops.no_op,
                                                  lambda: init_covs).op)
开发者ID:AndreasGocht,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py

示例11: _define_maximization_operation

  def _define_maximization_operation(self, num_batches):
    """Maximization operations."""
    # TODO(xavigonzalvo): some of these operations could be moved to C++.
    # Compute the effective number of data points assigned to component k.
    with ops.control_dependencies(self._w):
      points_in_k = array_ops.squeeze(
          math_ops.add_n(self._points_in_k), axis=[0])
      # Update alpha.
      if 'w' in self._params:
        final_points_in_k = points_in_k / num_batches
        num_examples = math_ops.cast(math_ops.reduce_sum(final_points_in_k),
                                     dtypes.float32)
        self._alpha_op = self._alpha.assign(final_points_in_k /
                                            (num_examples + MEPS))
      else:
        self._alpha_op = control_flow_ops.no_op()
      self._train_ops = [self._alpha_op]

      # Update means.
      points_in_k_expanded = array_ops.reshape(points_in_k,
                                               [self._num_classes, 1, 1])
      if 'm' in self._params:
        self._means_op = self._means.assign(
            math_ops.div(
                math_ops.add_n(self._w_mul_x), points_in_k_expanded + MEPS))
      else:
        self._means_op = control_flow_ops.no_op()
      # means are (num_classes x 1 x dims)

      # Update covariances.
      with ops.control_dependencies([self._means_op]):
        b = math_ops.add_n(self._w_mul_x2) / (points_in_k_expanded + MEPS)
        new_covs = []
        for k in range(self._num_classes):
          mean = self._means.value()[k, :, :]
          square_mean = math_ops.matmul(mean, mean, transpose_a=True)
          new_cov = b[k, :, :] - square_mean + self._min_var
          if self._covariance_type == FULL_COVARIANCE:
            new_covs.append(array_ops.expand_dims(new_cov, 0))
          elif self._covariance_type == DIAG_COVARIANCE:
            new_covs.append(
                array_ops.expand_dims(array_ops.diag_part(new_cov), 0))
        new_covs = array_ops.concat(new_covs, 0)
        if 'c' in self._params:
          # Train operations don't need to take care of the means
          # because covariances already depend on it.
          with ops.control_dependencies([self._means_op, new_covs]):
            self._train_ops.append(
                state_ops.assign(
                    self._covs, new_covs, validate_shape=False))
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:50,代码来源:gmm_ops.py

示例12: testDiagPartGrad

 def testDiagPartGrad(self):
   np.random.seed(0)
   shapes = ((3, 3), (3, 3, 3, 3))
   dtypes = (dtypes_lib.float32, dtypes_lib.float64)
   with self.test_session(use_gpu=False):
     errors = []
     for shape in shapes:
       for dtype in dtypes:
         x1 = constant_op.constant(np.random.rand(*shape), dtype=dtype)
         y = array_ops.diag_part(x1)
         error = gradient_checker.compute_gradient_error(
             x1, x1.get_shape().as_list(), y, y.get_shape().as_list())
         tf_logging.info("error = %f", error)
         self.assertLess(error, 1e-4)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:14,代码来源:diag_op_test.py

示例13: _orthogonal_matrix

  def _orthogonal_matrix(self, n):
    """Construct an n x n orthogonal matrix.

    Args:
      n: dimension.
    Returns:
      a n x n orthogonal matrix.
    """
    a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
    if self.seed:
      self.seed += 1
    q, r = linalg_ops.qr(a)
    d = array_ops.diag_part(r)
    # make q uniform
    q *= math_ops.sign(d)
    return q
开发者ID:moses-sun,项目名称:tensorflow,代码行数:16,代码来源:init_ops.py

示例14: _create_variables

  def _create_variables(self, data, initial_means=None):
    """Initializes GMM algorithm.

    Args:
      data: a list of Tensors with data, each row is a new example.
      initial_means: a Tensor with a matrix of means.
    """
    first_shard = data[0]
    # Initialize means: num_classes X 1 X dimensions.
    if initial_means is not None:
      self._means = variables.Variable(
          array_ops.expand_dims(initial_means, 1),
          name=self.CLUSTERS_VARIABLE,
          validate_shape=False,
          dtype=dtypes.float32)
    else:
      # Sample data randomly
      self._means = variables.Variable(
          array_ops.expand_dims(
              _init_clusters_random(data, self._num_classes, self._random_seed),
              1),
          name=self.CLUSTERS_VARIABLE,
          validate_shape=False)

    # Initialize covariances.
    if self._covariance_type == FULL_COVARIANCE:
      cov = _covariance(first_shard, False) + self._min_var
      # A matrix per class, num_classes X dimensions X dimensions
      covs = array_ops.tile(
          array_ops.expand_dims(cov, 0), [self._num_classes, 1, 1])
    elif self._covariance_type == DIAG_COVARIANCE:
      cov = _covariance(first_shard, True) + self._min_var
      # A diagonal per row, num_classes X dimensions.
      covs = array_ops.tile(
          array_ops.expand_dims(array_ops.diag_part(cov), 0),
          [self._num_classes, 1])
    self._covs = variables.Variable(
        covs, name=self.CLUSTERS_COVS_VARIABLE, validate_shape=False)
    # Mixture weights, representing the probability that a randomly
    # selected unobservable data (in EM terms) was generated by component k.
    self._alpha = variables.Variable(
        array_ops.tile([1.0 / self._num_classes], [self._num_classes]),
        name=self.CLUSTERS_WEIGHT,
        validate_shape=False)
开发者ID:LugarkPirog,项目名称:tensorflow,代码行数:44,代码来源:gmm_ops.py

示例15: _DiagGrad

def _DiagGrad(_, grad):
  return array_ops.diag_part(grad)
开发者ID:0ruben,项目名称:tensorflow,代码行数:2,代码来源:array_grad.py


注:本文中的tensorflow.python.ops.array_ops.diag_part函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。