本文整理汇总了Python中tensorflow.python.ops.array_ops.diag_part方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.diag_part方法的具体用法?Python array_ops.diag_part怎么用?Python array_ops.diag_part使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.diag_part方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _DiagGrad
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import diag_part [as 别名]
def _DiagGrad(_, grad):
return array_ops.diag_part(grad)
示例2: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import diag_part [as 别名]
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
square_len = math_ops.minimum(num_rows, num_cols)
d = array_ops.diag_part(r[:square_len, :square_len])
ph = d / math_ops.abs(d)
q *= ph
# Pad zeros to Q (if rows smaller than cols)
if num_rows < num_cols:
padding = array_ops.zeros([num_rows, num_cols - num_rows], dtype=dtype)
q = array_ops.concat([q, padding], 1)
return self.gain * array_ops.reshape(q, shape)
示例3: _batch_log_det
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import diag_part [as 别名]
def _batch_log_det(self):
"""Log determinant of every batch member."""
# Note that array_ops.diag_part does not seem more efficient for non-batch,
# and would give a bad result for a batch matrix, so aways use
# matrix_diag_part.
diag = array_ops.matrix_diag_part(self._chol)
det = 2.0 * math_ops.reduce_sum(math_ops.log(math_ops.abs(diag)),
reduction_indices=[-1])
det.set_shape(self.get_shape()[:-2])
return det
示例4: _batch_log_det
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import diag_part [as 别名]
def _batch_log_det(self):
"""Log determinant of every batch member."""
# Note that array_ops.diag_part does not seem more efficient for non-batch,
# and would give a bad result for a batch matrix, so aways use
# matrix_diag_part.
diag = array_ops.matrix_diag_part(self._chol)
det = 2.0 * math_ops.reduce_sum(math_ops.log(diag), reduction_indices=[-1])
det.set_shape(self.get_shape()[:-2])
return det
示例5: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import diag_part [as 别名]
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
ph = d / math_ops.abs(d)
q *= ph
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:29,代码来源:init_ops.py