本文整理汇总了Python中scipy.linalg.det函数的典型用法代码示例。如果您正苦于以下问题:Python det函数的具体用法?Python det怎么用?Python det使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了det函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lnmixgaussian
def lnmixgaussian(x, params):
"""
NAME:
lnmixgaussian
PURPOSE:
returns the log of a mixture of two two-dimensional gaussian
INPUT:
x - 2D point to evaluate the Gaussian at
params - mean and variances ([mean_array,inverse variance matrix, mean_array, inverse variance, amp1])
OUTPUT:
log N(mean,var)
HISTORY:
2009-10-30 - Written - Bovy (NYU)
"""
return sc.log(
params[4]
/ 2.0
/ sc.pi
* sc.sqrt(linalg.det(params[1]))
* sc.exp(-0.5 * sc.dot(x - params[0], sc.dot(params[1], x - params[0])))
+ (1.0 - params[4])
/ 2.0
/ sc.pi
* sc.sqrt(linalg.det(params[3]))
* sc.exp(-0.5 * sc.dot(x - params[2], sc.dot(params[3], x - params[2])))
)
示例2: LL
def LL(self,h,X=None,stack=True,REML=False):
"""
Computes the log-likelihood for a given heritability (h). If X==None, then the
default X0t will be used. If X is set and stack=True, then X0t will be matrix concatenated with
the input X. If stack is false, then X is used in place of X0t in the LL calculation.
REML is computed by adding additional terms to the standard LL and can be computed by setting REML=True.
"""
if X is None: X = self.X0t
elif stack:
self.X0t_stack[:,(self.q)] = matrixMult(self.Kve.T,X)[:,0]
X = self.X0t_stack
n = float(self.N)
q = float(X.shape[1])
beta,sigma,Q,XX_i,XX = self.getMLSoln(h,X)
LL = n*np.log(2*np.pi) + np.log(h*self.Kva + (1.0-h)).sum() + n + n*np.log(1.0/n * Q)
LL = -0.5 * LL
if REML:
LL_REML_part = q*np.log(2.0*np.pi*sigma) + np.log(det(matrixMult(X.T,X))) - np.log(det(XX))
LL = LL + 0.5*LL_REML_part
LL = LL.sum()
return LL,beta,sigma,XX_i
示例3: __init__
def __init__(self,n, dimz = 2, dimx = 3):
self.n = n
self.W = RS.normal(0,1, size = (dimx,dimz))
self.sigx = 0.000000000001#RS.normal(0,1)
self.dimz = dimz
self.dimx = dimx
data = util.generate_data(n, self.W, self.sigx, dimx, dimz)
self.observed = data[0]
self.latent = data[1]
self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
self.cov = np.linalg.inv(self.prec)
'''
values for normalisation computation-- messy!
'''
temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
self.pc_norm1 = temp1/temp2
temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
self.wtwinv = temp3
temp3 = np.dot(self.W, temp3)
self.pc_norm2 = np.dot(temp3, self.W.transpose())
示例4: find_best_rotation
def find_best_rotation(q1, q2):
"""
This function calculates the best rotation between two srvfs using
procustes rigid alignment
:param q1: numpy ndarray of shape (2,M) of M samples
:param q2: numpy ndarray of shape (2,M) of M samples
:rtype: numpy ndarray
:return q2new: optimal rotated q2 to q1
:return R: rotation matrix
"""
eps = finfo(double).eps
n, T = q1.shape
A = q1.dot(q2.T)
U, s, V = svd(A)
if (abs(det(U) * det(V) - 1) < 10 * eps):
S = eye(n)
else:
S = eye(n)
S[:, -1] = -S[:, -1]
R = U.dot(S).dot(V.T)
q2new = R.dot(q2)
return (q2new, R)
示例5: __init__
def __init__(self,n, dimz = 2, dimx = 3):
self.n = n
self.W = RS.normal(0,1, size = (dimx,dimz))
self.sigx = 0.000000000001#RS.normal(0,1)
self.dimz = dimz
self.dimx = dimx
data = self.generate_data(n)
self.observed = data[0]
#we keep this to test
self.latent = data[1]
self.prec = (1/self.sigx)*np.dot(self.W.transpose(), self.W)
self.cov = np.linalg.inv(self.prec)
'''
values for normalisation computation
'''
temp1 = (2*np.pi)**(dimz/2.0)*np.sqrt(det(self.cov))
temp2 = det(2*np.pi*self.sigx*np.identity(dimz))
self.pc_norm1 = temp1/temp2
temp3 = np.linalg.inv(np.dot(self.W.transpose(), self.W))
self.wtwinv = temp3
temp3 = np.dot(self.W, temp3)
self.pc_norm2 = np.dot(temp3, self.W.transpose())
'''
prior for n factors will be product of n priors~ N(0,I)
'''
#I = np.identity(dimz)
#product_priors = MVN(0, n*I)
'''
示例6: __init__
def __init__(self, pos, neg, k=5):
self.k = k
pos = pos.T
neg = neg.T
totals = pos.sum(axis=1)
popular = numpy.argsort(totals, axis=0)[::-1, :][1 : 1 + k, :]
popular = numpy.array(popular.T)[0]
self.popular = popular
pos = pos[popular, :].todense()
neg = neg[popular, :].todense()
self.posmu = pos.mean(axis=1)
self.negmu = neg.mean(axis=1)
p = pos - self.posmu
n = neg - self.negmu
self.pcov = p * p.T / p.shape[1]
self.ncov = n * n.T / n.shape[1]
self.pdet = dlinalg.det(self.pcov)
self.ndet = dlinalg.det(self.ncov)
assert self.pdet != 0
assert self.ndet != 0
self.picov = dlinalg.inv(self.pcov)
self.nicov = dlinalg.inv(self.ncov)
示例7: matrix_normal_density
def matrix_normal_density(X, M, U, V):
"""Sample from a matrix normal distribution"""
norm = - 0.5*np.log(la.det(2*np.pi*U)) - 0.5*np.log(la.det(2*np.pi*V))
XM = X-M
pptn = -0.5*np.trace( np.dot(la.solve(U,XM),la.solve(V,XM.T)) )
pdf = norm + pptn
return pdf
示例8: from_matrix44
def from_matrix44(self, aff):
"""
Convert a 4x4 matrix describing an affine transform into a
12-sized vector of natural affine parameters: translation,
rotation, log-scale, pre-rotation (to allow for shearing when
combined with non-unitary scales). In case the transform has a
negative determinant, set the `_direct` attribute to False.
"""
vec12 = np.zeros((12,))
vec12[0:3] = aff[:3, 3]
# Use SVD to find orthogonal and diagonal matrices such that
# aff[0:3,0:3] == R*S*Q
R, s, Q = spl.svd(aff[0:3, 0:3])
if spl.det(R) < 0:
R = -R
Q = -Q
r = rotation_mat2vec(R)
if spl.det(Q) < 0:
Q = -Q
self._direct = False
q = rotation_mat2vec(Q)
vec12[3:6] = r
vec12[6:9] = np.log(np.maximum(s, TINY))
vec12[9:12] = q
self._vec12 = vec12
示例9: testQuaternion
def testQuaternion():
dtheta = 30.0
quat = Quaternion()
print quat.q
print quat.toRot()
print det(quat.toRot())
figm = mlab.figure(bgcolor=(1,1,1))
for i in range(100):
print quat.sampleUnif(0.5*np.pi)
k,theta = quat.toAxisAngle()
print theta*180.0/np.pi
plotCosy(figm, quat.toRot())
figm = mlab.figure(bgcolor=(1,1,1))
for i in range(100):
print quat.sample(dtheta)
k,theta = quat.toAxisAngle()
print theta*180.0/np.pi
plotCosy(figm, quat.toRot())
figm1 = mlab.figure(bgcolor=(1,1,0.0))
for i in range(100):
# sample rotation axis
k = np.random.rand(3)-0.5
# sample uiniformly from +- 5 degrees
theta = (np.asscalar(np.random.rand(1)) + dtheta - 0.5) *np.pi/180.0 # (np.a sscalar(np.random.rand(1))-0.5)*np.pi/(180.0/(2.0*dtheta))
print 'perturbation: {} theta={}'.format(k/norm(k),theta*180.0/np.pi)
dR = RodriguesRotation(k/norm(k),theta)
plotCosy(figm1, dR)
mlab.show()
示例10: _update_precisions
def _update_precisions(self):
"""Update the variational distributions for the precisions"""
if self.cvtype == 'spherical':
self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
for k in xrange(self.n_components):
# XXX: how to avoid this huge temporary matrix in memory
dif = (self._X - self._means[k])
self._b[k] = 1.
d = np.sum(dif * dif, axis=1)
self._b[k] += 0.5 * np.sum(
self._z.T[k] * (d + self.n_features))
self._bound_prec[k] = (
0.5 * self.n_features * (
digamma(self._a[k]) - np.log(self._b[k])))
self._precs = self._a / self._b
elif self.cvtype == 'diag':
for k in xrange(self.n_components):
self._a[k].fill(1. + 0.5 * np.sum(self._z.T[k], axis=0))
ddif = (self._X - self._means[k]) # see comment above
for d in xrange(self.n_features):
self._b[k, d] = 1.
dd = ddif.T[d] * ddif.T[d]
self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
self._precs[k] = self._a[k] / self._b[k]
self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k])
- np.log(self._b[k]))
self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])
elif self.cvtype == 'tied':
self._a = 2 + self._X.shape[0] + self.n_features
self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
for i in xrange(self._X.shape[0]):
for k in xrange(self.n_components):
dif = self._X[i] - self._means[k]
self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)),
dif.reshape((1, -1)))
self._B = linalg.pinv(self._B)
self._precs = self._a * self._B
self._detB = linalg.det(self._B)
self._bound_prec = 0.5 * detlog_wishart(
self._a, self._B, self._detB, self.n_features)
self._bound_prec -= 0.5 * self._a * np.trace(self._B)
elif self.cvtype == 'full':
for k in xrange(self.n_components):
T = np.sum(self._z.T[k])
self._a[k] = 2 + T + self.n_features
self._B[k] = (T + 1) * np.identity(self.n_features)
dx = self._X - self._means[k]
self._B[k] += np.dot((self._z[:, k] * dx.T), dx)
self._B[k] = linalg.inv(self._B[k])
self._precs[k] = self._a[k] * self._B[k]
self._detB[k] = linalg.det(self._B[k])
self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k],
self._B[k],
self._detB[k],
self.n_features)
self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
示例11: KL_normal
def KL_normal(m1, sigma1, m2, sigma2):
"""
Calculates the KL divergence between two normal distributions specified by
N(``mu1``, ``sigma1``), N(``mu2``, ``sigma2``)
"""
return 1. / 2. * (math.log(det(sigma2) / det(sigma1)) - len(m1) + trace(mdot(inv(sigma2), sigma1)) + \
mdot((m2 - m1).T, inv(sigma2) , m2- m1))
示例12: Matusita_kernel
def Matusita_kernel(cov_1, cov_2):
p = np.shape(cov_1)[0]
det_1 = la.det(cov_1)
det_2 = la.det(cov_2)
det_sum = la.det(cov_1 + cov_2)
return ((2 ** (p/2.0)) * (det_1 ** 0.25) * (det_2 ** 0.25))/(det_sum ** 0.5)
示例13: _ar_model_select
def _ar_model_select(R, m, ne, p_range):
"""model order selection
:Parameters:
R : ndarray
upper triangular mx from QR
m : int
state vector dimension
ne : int
number of bock equations of size m used in the estimation
p_range : list
list of model order to select from
"""
# inits
p_max = max(p_range)
p_len = len(p_range)
sbc = N.zeros(p_len)
fpe = N.zeros(p_len)
ldp = N.zeros(p_len)
np = N.zeros(p_len)
np[-1] = m * p_max
# get lower right triangle of R
#
# | R11 R12 |
# R = | |
# | 0 R22 |
#
R22 = R[np[-1]:np[-1] + m, :][:, np[-1]:np[-1] + m]
invR22 = NL.inv(R22)
Mp = N.dot(invR22, invR22.T)
# model selection
ldp[-1] = 2.0 * N.log(NL.det(R22))
for i in reversed(xrange(p_len)):
np[i] = m * p_range[i]
if p_range[i] < p_max:
# downdated part of R
Rp = R[np[i]:np[i] + m, :][:, np[-1]:np[-1] + m]
# woodbury formular
L = NL.cholesky(N.eye(m) + N.dot(N.dot(Rp, Mp), Rp.T), lower=True)
Np = N.dot(N.dot(NL.inv(L), Rp), Mp)
Mp = Mp - N.dot(Np.T, Np)
ldp[i] = ldp[i + 1] + 2.0 * N.log(NL.det(L))
# selector metrics
sbc[i] = ldp[i] / m - N.log(ne) * (ne - np[i]) / ne
fpe[i] = ldp[i] / m - N.log(ne * (ne - np[i]) / (ne + np[i]))
# return
return sbc, fpe, ldp, np
示例14: main1
def main1():
N = 4 # Число срезов во времени
n = 1
r = 1
p = 1
m = 1
s = 2
q = 1 # Число точек плана
solver = IMFSolver(n=n, r=r, p=p, m=m, s=s, N=N)
theta = [1.0, 1.0]
solver.set_Phi([[theta[0]]])
solver.set_diff_Phi_theta([[1.0]], 0)
solver.set_diff_Phi_theta([[0.0]], 1)
solver.set_Psi([[theta[1]]])
solver.set_diff_Psi_theta([[0.0]], 0)
solver.set_diff_Psi_theta([[1.0]], 1)
solver.set_Gamma([[1.0]])
solver.set_diff_Gamma_theta([[0.0]], 0)
solver.set_diff_Gamma_theta([[0.0]], 1)
solver.set_H([[1.0]])
solver.set_diff_H_theta([[0.0]], 0)
solver.set_diff_H_theta([[0.0]], 1)
solver.set_Q([[0.1]])
solver.set_diff_Q_theta([[0.0]], 0)
solver.set_diff_Q_theta([[0.0]], 1)
solver.set_R([[0.3]])
solver.set_diff_R_theta([[0.0]], 0)
solver.set_diff_R_theta([[0.0]], 1)
solver.set_x0([[0.0]])
solver.set_diff_x0_theta([[0.0]], 0)
solver.set_diff_x0_theta([[0.0]], 1)
solver.set_P0([[0.1]])
solver.set_diff_P0_theta([[0.0]], 0)
solver.set_diff_P0_theta([[0.0]], 1)
solver.set_u([[1.0]], 0)
solver.set_u([[1.0]], 1)
solver.set_u([[1.0]], 2)
solver.set_u([[2.0]], 3)
M = solver.get_inf_matrix()
print "M"
print M
print "la.det(M) = ", la.det(M)
print "-np.log(la.det(M)) = ", -np.log(la.det(M))
示例15: give_me_an_array
def give_me_an_array(A, B, C):
output = np.zeros((2,2))
### START YOUR CODE HERE ###
output[0,0] = linalg.det(A+B.dot(linalg.inv(C)))
output[0,1] = linalg.det(B-C.dot(linalg.inv(A)))
output[1,0] = linalg.det(B-A.dot(linalg.inv(C)))
output[1,1] = linalg.det(A+C.dot(linalg.inv(B)))
#### END YOUR CODE HERE ####
return output