本文整理汇总了Python中tensorflow.conj方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.conj方法的具体用法?Python tensorflow.conj怎么用?Python tensorflow.conj使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.conj方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compareGradient
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _compareGradient(self, x):
# x[:, 0] is real, x[:, 1] is imag. We combine real and imag into
# complex numbers. Then, we extract real and imag parts and
# computes the squared sum. This is obviously the same as sum(real
# * real) + sum(imag * imag). We just want to make sure the
# gradient function is checked.
with self.test_session():
inx = tf.convert_to_tensor(x)
real, imag = tf.split(1, 2, inx)
real, imag = tf.reshape(real, [-1]), tf.reshape(imag, [-1])
cplx = tf.complex(real, imag)
cplx = tf.conj(cplx)
loss = tf.reduce_sum(
tf.square(tf.real(cplx))) + tf.reduce_sum(
tf.square(tf.imag(cplx)))
epsilon = 1e-3
jacob_t, jacob_n = tf.test.compute_gradient(inx,
list(x.shape),
loss,
[1],
x_init_value=x,
delta=epsilon)
self.assertAllClose(jacob_t, jacob_n, rtol=epsilon, atol=epsilon)
示例2: _checkGrad
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _checkGrad(self, func, x, y, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(x)
iny = tf.convert_to_tensor(y)
# func is a forward or inverse FFT function (batched or unbatched)
z = func(tf.complex(inx, iny))
# loss = sum(|z|^2)
loss = tf.reduce_sum(tf.real(z * tf.conj(z)))
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = tf.test.compute_gradient(
[inx, iny],
[list(x.shape), list(y.shape)],
loss,
[1],
x_init_value=[x, y],
delta=1e-2)
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=1e-2, atol=1e-2)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=1e-2, atol=1e-2)
示例3: compute_log_mel_spectrograms
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def compute_log_mel_spectrograms(stfts, hparams):
# power_spectrograms = tf.real(stfts * tf.conj(stfts))
magnitude_spectrograms = tf.abs(stfts)
num_spectrogram_bins = magnitude_spectrograms.shape[-1].value
linear_to_mel_weight_matrix = signal.linear_to_mel_weight_matrix(
hparams.num_mel_bins, num_spectrogram_bins, hparams.sample_rate, hparams.mel_lower_edge_hz,
hparams.mel_upper_edge_hz)
mel_spectrograms = tf.tensordot(
magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
# Note: Shape inference for `tf.tensordot` does not currently handle this case.
mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate(
linear_to_mel_weight_matrix.shape[-1:]))
log_offset = 1e-6
log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)
return log_mel_spectrograms
示例4: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def __init__(self,x_op,y_op,sess,remove_bias=False,name=None):
# Save parameters
self.x_op = x_op
self.y_op = y_op
self.sess = sess
self.remove_bias = remove_bias
# Get dimensions and data types
shape0 = x_op.get_shape()
shape1 = y_op.get_shape()
dtype0 = x_op.dtype
dtype1 = y_op.dtype
BaseLinTrans.__init__(self, shape0, shape1, dtype0, dtype1,\
svd_avail=False,name=name)
# Create the ops for the gradient. If the linear operator is y=F(x),
# then z = y'*F(x). Therefore, dz/dx = F'(y).
self.ytr_op = tf.placeholder(self.dtype1,self.shape1)
self.z_op = tf.reduce_sum(tf.multiply(tf.conj(self.ytr_op),self.y_op))
self.zgrad_op = tf.gradients(self.z_op,self.x_op)[0]
# Compute output at zero to subtract
if self.remove_bias:
xzero = np.zeros(self.shape0)
self.y_bias = self.sess.run(self.y_op, feed_dict={self.x_op: xzero})
else:
self.y_bias = 0
示例5: test_Conj
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def test_Conj(self):
t = tf.conj(self.random(3, 4, complex=True))
self.check(t)
示例6: _matmul_right
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _matmul_right(self, x, adjoint=False, adjoint_arg=False):
diag_mat = tf.conj(self._diag) if adjoint else self._diag
x = linalg.adjoint(x) if adjoint_arg else x
return diag_mat * x
示例7: _matmul_sparse
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _matmul_sparse(self, x, adjoint=False, adjoint_arg=False):
diag_mat = tf.conj(self._diag) if adjoint else self._diag
assert not adjoint_arg
return utils.matmul_diag_sparse(diag_mat, x)
示例8: _compareConj
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _compareConj(self, cplx, use_gpu):
np_ans = np.conj(cplx)
with self.test_session(use_gpu=use_gpu):
inx = tf.convert_to_tensor(cplx)
tf_conj = tf.conj(inx)
tf_ans = tf_conj.eval()
self.assertAllEqual(np_ans, tf_ans)
self.assertShapeEqual(np_ans, tf_conj)
示例9: testConjReal
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def testConjReal(self):
for dtype in tf.int32, tf.int64, tf.float16, tf.float32, tf.float64:
x = tf.placeholder(dtype)
y = tf.conj(x)
self.assertEqual(x, y)
示例10: testConjString
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def testConjString(self):
x = tf.placeholder(tf.string)
with self.assertRaisesRegexp(TypeError, r"Expected numeric tensor"):
tf.conj(x)
示例11: mriAdjointOpWithOS
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def mriAdjointOpWithOS(self, f, coil_sens, sampling_mask):
with tf.variable_scope('mriAdjointOp'):
# variables to remove frequency encoding oversampling
pad_u = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) + 1, tf.int32)
pad_l = tf.cast(tf.multiply(tf.cast(tf.shape(sampling_mask)[1], tf.float32), 0.25) - 1, tf.int32)
# apply mask and perform inverse centered Fourier transform
mask = tf.expand_dims(sampling_mask, axis=1)
Finv = tf.contrib.icg.ifftc2d(tf.complex(tf.real(f) * mask, tf.imag(f) * mask))
# multiply coil images with sensitivities and sum up over channels
img = tf.reduce_sum(Finv * tf.conj(coil_sens), 1)[:, pad_u:-pad_l, :]
return img
示例12: mriAdjointOp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def mriAdjointOp(self, f, coil_sens, sampling_mask):
with tf.variable_scope('mriAdjointOp'):
# apply mask and perform inverse centered Fourier transform
mask = tf.expand_dims(sampling_mask, axis=1)
Finv = tf.contrib.icg.ifftc2d(tf.complex(tf.real(f) * mask, tf.imag(f) * mask))
# multiply coil images with sensitivities and sum up over channels
img = tf.reduce_sum(Finv * tf.conj(coil_sens), 1)
return img
示例13: _ccorr
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def _ccorr(self, a, b):
a = tf.cast(a, tf.complex64)
b = tf.cast(b, tf.complex64)
return tf.real(tf.ifft(tf.conj(tf.fft(a)) * tf.fft(b)))
示例14: get_correlations
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def get_correlations(Y, inverse_power, taps, delay):
"""Calculates weighted correlations of a window of length taps
Args:
Y (tf.Ttensor): Complex-valued STFT signal with shape (F, D, T)
inverse_power (tf.Tensor): Weighting factor with shape (F, T)
taps (int): Lenghts of correlation window
delay (int): Delay for the weighting factor
Returns:
tf.Tensor: Correlation matrix of shape (F, taps*D, taps*D)
tf.Tensor: Correlation vector of shape (F, taps*D)
"""
dyn_shape = tf.shape(Y)
F = dyn_shape[0]
D = dyn_shape[1]
T = dyn_shape[2]
Psi = tf_signal.frame(Y, taps, 1, axis=-1)[..., :T - delay - taps + 1, ::-1]
Psi_conj_norm = (
tf.cast(inverse_power[:, None, delay + taps - 1:, None], Psi.dtype)
* tf.conj(Psi)
)
correlation_matrix = tf.einsum('fdtk,fetl->fkdle', Psi_conj_norm, Psi)
correlation_vector = tf.einsum(
'fdtk,fet->fked', Psi_conj_norm, Y[..., delay + taps - 1:]
)
correlation_matrix = tf.reshape(correlation_matrix, (F, taps * D, taps * D))
return correlation_matrix, correlation_vector
示例15: sph_harm_transform
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import conj [as 别名]
def sph_harm_transform(f, mode='DH', harmonics=None):
""" Project spherical function into the spherical harmonics basis. """
assert f.shape[0] == f.shape[1]
if isinstance(f, tf.Tensor):
sumfun = tf.reduce_sum
conjfun = lambda x: tf.conj(x)
n = f.shape[0].value
else:
sumfun = np.sum
conjfun = np.conj
n = f.shape[0]
assert np.log2(n).is_integer()
if harmonics is None:
harmonics = sph_harm_all(n)
a = DHaj(n, mode)
f = f*np.array(a)[np.newaxis, :]
real = is_real_sft(harmonics)
coeffs = []
for l in range(n // 2):
row = []
minl = 0 if real else -l
for m in range(minl, l+1):
# WARNING: results are off by this factor, when using driscoll1994computing formulas
factor = 2*np.sqrt(np.pi)
row.append(sumfun(factor * np.sqrt(2*np.pi)/n * f * conjfun(harmonics[l][m-minl])))
coeffs.append(row)
return coeffs