本文整理汇总了Python中theano.tensor.nlinalg.det函数的典型用法代码示例。如果您正苦于以下问题:Python det函数的具体用法?Python det怎么用?Python det使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了det函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_det_shape
def test_det_shape():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
x = tensor.matrix()
f = theano.function([x], det(x))
f_shape = theano.function([x], det(x).shape)
assert np.all(f(r).shape == f_shape(r))
示例2: logp
def logp(self, X):
n = self.n
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(
((n - p - 1) * log(IXI) - trace(matrix_inverse(V).dot(X)) -
n * p * log(2) - n * log(IVI) - 2 * multigammaln(n / 2., p)) / 2,
n > (p - 1))
示例3: logp
def logp(self, X):
n = self.n
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(
((n - p - 1) * log(IXI) - trace(matrix_inverse(V).dot(X)) -
n * p * log(2) - n * log(IVI) - 2 * multigammaln(n / 2., p)) / 2,
gt(n, (p - 1)),
all(gt(eigh(X)[0], 0)),
eq(X, X.T)
)
示例4: logp
def logp(self, X):
n = self.n
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(((n - p - 1) * tt.log(IXI)
- trace(matrix_inverse(V).dot(X))
- n * p * tt.log(2) - n * tt.log(IVI)
- 2 * multigammaln(n / 2., p)) / 2,
matrix_pos_def(X),
tt.eq(X, X.T),
n > (p - 1))
示例5: test_det
def test_det():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(5, 5).astype(config.floatX)
x = tensor.matrix()
f = theano.function([x], det(x))
assert np.allclose(np.linalg.det(r), f(r))
示例6: likelihood
def likelihood(f, l, R, mu, eps, sigma2, lambda_1=1e-4):
# The similarity matrix W is a linear combination of the slices in R
W = T.tensordot(R, mu, axes=1)
# The following indices correspond to labeled and unlabeled examples
labeled = T.eq(l, 1).nonzero()
# Calculating the graph Laplacian of W
D = T.diag(W.sum(axis=0))
L = D - W
# The Covariance (or Kernel) matrix is the inverse of the (regularized) Laplacian
epsI = eps * T.eye(L.shape[0])
rL = L + epsI
Sigma = nlinalg.matrix_inverse(rL)
# The marginal density of labeled examples uses Sigma_LL as covariance (sub-)matrix
Sigma_LL = Sigma[labeled][:, labeled][:, 0, :]
# We also consider additive Gaussian noise with variance sigma2
K_L = Sigma_LL + (sigma2 * T.eye(Sigma_LL.shape[0]))
# Calculating the inverse and the determinant of K_L
iK_L = nlinalg.matrix_inverse(K_L)
dK_L = nlinalg.det(K_L)
f_L = f[labeled]
# The (L1-regularized) log-likelihood is given by the summation of the following four terms
term_A = - (1 / 2) * f_L.dot(iK_L.dot(f_L))
term_B = - (1 / 2) * T.log(dK_L)
term_C = - (1 / 2) * T.log(2 * np.pi)
term_D = - lambda_1 * T.sum(abs(mu))
return term_A + term_B + term_C + term_D
示例7: logp
def logp(self, X):
nu = self.nu
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(((nu - p - 1) * tt.log(IXI)
- trace(matrix_inverse(V).dot(X))
- nu * p * tt.log(2) - nu * tt.log(IVI)
- 2 * multigammaln(nu / 2., p)) / 2,
matrix_pos_def(X),
tt.eq(X, X.T),
nu > (p - 1),
broadcast_conditions=False
)
示例8: logp
def logp(self, value):
mu = self.mu
tau = self.tau
delta = value - mu
k = tau.shape[0]
return 1/2. * (-k * log(2*pi) + log(det(tau)) - dot(delta.T, dot(tau, delta)))
示例9: logp
def logp(self, x):
n = self.n
p = self.p
X = x[self.tri_index]
X = T.fill_diagonal(X, 1)
result = self._normalizing_constant(n, p)
result += (n - 1.0) * T.log(det(X))
return bound(result, T.all(X <= 1), T.all(X >= -1), n > 0)
示例10: evaluateLogDensity
def evaluateLogDensity(self,X,Y):
Ypred = theano.clone(self.rate,replace={self.Xsamp: X})
resY = Y-Ypred
resX = X[1:]-T.dot(X[:(X.shape[0]-1)],self.A.T)
resX0 = X[0]-self.x0
LogDensity = -(0.5*T.dot(resY.T,resY)*T.diag(self.Rinv)).sum() - (0.5*T.dot(resX.T,resX)*self.Lambda).sum() - 0.5*T.dot(T.dot(resX0,self.Lambda0),resX0.T)
LogDensity += 0.5*(T.log(self.Rinv)).sum()*Y.shape[0] + 0.5*T.log(Tla.det(self.Lambda))*(Y.shape[0]-1) + 0.5*T.log(Tla.det(self.Lambda0)) - 0.5*(self.xDim + self.yDim)*np.log(2*np.pi)*Y.shape[0]
return LogDensity
示例11: logp
def logp(self, x):
n = self.n
p = self.p
X = x[self.tri_index]
X = t.fill_diagonal(X, 1)
result = self._normalizing_constant(n, p)
result += (n - 1.0) * log(det(X))
return bound(result, n > 0, all(le(X, 1)), all(ge(X, -1)))
示例12: logp_normal
def logp_normal(mu, tau, value):
# log probability of individual samples
dim = tau.shape[0]
delta = lambda mu: value - mu
return -0.5 * (dim * tt.log(2 * np.pi) + tt.log(1/det(tau)) +
(delta(mu).dot(tau) * delta(mu)).sum(axis=1))