本文整理汇总了Python中theano.tensor.nlinalg.matrix_inverse函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_inverse函数的具体用法?Python matrix_inverse怎么用?Python matrix_inverse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_inverse函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_S
def compute_S(idx, Sp1, zAA, zBB):
Sm = ifelse(T.eq(idx, nT-2),
T.dot(zBB[iib[-1]], Tla.matrix_inverse(zAA[iia[-1]])),
T.dot(zBB[iib[idx]],Tla.matrix_inverse(zAA[iia[T.min([idx+1,nT-2])]]
- T.dot(Sp1,T.transpose(zBB[iib[T.min([idx+1,nT-2])]]))))
)
return Sm
示例2: likelihood
def likelihood(f, l, R, mu, eps, sigma2, lambda_1=1e-4):
# The similarity matrix W is a linear combination of the slices in R
W = T.tensordot(R, mu, axes=1)
# The following indices correspond to labeled and unlabeled examples
labeled = T.eq(l, 1).nonzero()
# Calculating the graph Laplacian of W
D = T.diag(W.sum(axis=0))
L = D - W
# The Covariance (or Kernel) matrix is the inverse of the (regularized) Laplacian
epsI = eps * T.eye(L.shape[0])
rL = L + epsI
Sigma = nlinalg.matrix_inverse(rL)
# The marginal density of labeled examples uses Sigma_LL as covariance (sub-)matrix
Sigma_LL = Sigma[labeled][:, labeled][:, 0, :]
# We also consider additive Gaussian noise with variance sigma2
K_L = Sigma_LL + (sigma2 * T.eye(Sigma_LL.shape[0]))
# Calculating the inverse and the determinant of K_L
iK_L = nlinalg.matrix_inverse(K_L)
dK_L = nlinalg.det(K_L)
f_L = f[labeled]
# The (L1-regularized) log-likelihood is given by the summation of the following four terms
term_A = - (1 / 2) * f_L.dot(iK_L.dot(f_L))
term_B = - (1 / 2) * T.log(dK_L)
term_C = - (1 / 2) * T.log(2 * np.pi)
term_D = - lambda_1 * T.sum(abs(mu))
return term_A + term_B + term_C + term_D
示例3: __init__
def __init__(self, GenerativeParams, xDim, yDim, srng = None, nrng = None):
super(LDS, self).__init__(GenerativeParams,xDim,yDim,srng,nrng)
# parameters
if 'A' in GenerativeParams:
self.A = theano.shared(value=GenerativeParams['A'].astype(theano.config.floatX), name='A' ,borrow=True) # dynamics matrix
else:
# TBD:MAKE A BETTER WAY OF SAMPLING DEFAULT A
self.A = theano.shared(value=.5*np.diag(np.ones(xDim).astype(theano.config.floatX)), name='A' ,borrow=True) # dynamics matrix
if 'QChol' in GenerativeParams:
self.QChol = theano.shared(value=GenerativeParams['QChol'].astype(theano.config.floatX), name='QChol' ,borrow=True) # cholesky of innovation cov matrix
else:
self.QChol = theano.shared(value=(np.eye(xDim)).astype(theano.config.floatX), name='QChol' ,borrow=True) # cholesky of innovation cov matrix
if 'Q0Chol' in GenerativeParams:
self.Q0Chol = theano.shared(value=GenerativeParams['Q0Chol'].astype(theano.config.floatX), name='Q0Chol',borrow=True) # cholesky of starting distribution cov matrix
else:
self.Q0Chol = theano.shared(value=(np.eye(xDim)).astype(theano.config.floatX), name='Q0Chol',borrow=True) # cholesky of starting distribution cov matrix
if 'RChol' in GenerativeParams:
self.RChol = theano.shared(value=np.ndarray.flatten(GenerativeParams['RChol'].astype(theano.config.floatX)), name='RChol' ,borrow=True) # cholesky of observation noise cov matrix
else:
self.RChol = theano.shared(value=np.random.randn(yDim).astype(theano.config.floatX)/10, name='RChol' ,borrow=True) # cholesky of observation noise cov matrix
if 'x0' in GenerativeParams:
self.x0 = theano.shared(value=GenerativeParams['x0'].astype(theano.config.floatX), name='x0' ,borrow=True) # set to zero for stationary distribution
else:
self.x0 = theano.shared(value=np.zeros((xDim,)).astype(theano.config.floatX), name='x0' ,borrow=True) # set to zero for stationary distribution
if 'NN_XtoY_Params' in GenerativeParams:
self.NN_XtoY = GenerativeParams['NN_XtoY_Params']['network']
else:
# Define a neural network that maps the latent state into the output
gen_nn = lasagne.layers.InputLayer((None, xDim))
self.NN_XtoY = lasagne.layers.DenseLayer(gen_nn, yDim, nonlinearity=lasagne.nonlinearities.linear, W=lasagne.init.Orthogonal())
# set to our lovely initial values
if 'C' in GenerativeParams:
self.NN_XtoY.W.set_value(GenerativeParams['C'].astype(theano.config.floatX))
if 'd' in GenerativeParams:
self.NN_XtoY.b.set_value(GenerativeParams['d'].astype(theano.config.floatX))
# we assume diagonal covariance (RChol is a vector)
self.Rinv = 1./(self.RChol**2) #Tla.matrix_inverse(T.dot(self.RChol ,T.transpose(self.RChol)))
self.Lambda = Tla.matrix_inverse(T.dot(self.QChol ,self.QChol.T))
self.Lambda0 = Tla.matrix_inverse(T.dot(self.Q0Chol,self.Q0Chol.T))
# Call the neural network output a rate, basically to keep things consistent with the PLDS class
self.rate = lasagne.layers.get_output(self.NN_XtoY, inputs = self.Xsamp)
示例4: compute_D
def compute_D(idx, Dm1, zS, zAA, zBB):
D = ifelse(T.eq(idx, nT-1),
T.dot(Tla.matrix_inverse(zAA[iia[-1]]),
III + T.dot(T.transpose(zBB[iib[idx-1]]),
T.dot(Dm1,S[0])))
,
ifelse(T.eq(idx, 0),
Tla.matrix_inverse(zAA[iia[0]]
- T.dot(zBB[iib[0]], T.transpose(S[-1]))),
T.dot(Tla.matrix_inverse(zAA[iia[idx]]
- T.dot(zBB[iib[T.min([idx,nT-2])]],T.transpose(S[T.max([-idx-1,-nT+1])]))),
III + T.dot(T.transpose(zBB[iib[T.min([idx-1,nT-2])]]),
T.dot(Dm1,S[-idx])))
)
)
return D
示例5: get_bivariate_normal_spec
def get_bivariate_normal_spec():
X1,X2,mu,sigma = [T.scalar('X1'),T.scalar('X2'), T.vector('mu'), T.matrix('sigma')]
GaussianDensitySpec = FunctionSpec(variables=[X1, X2, mu, sigma],
output_expression = -0.5*T.dot(T.dot((T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu).T,
nlinalg.matrix_inverse(sigma)),
(T.concatenate([X1.dimshuffle('x'),X2.dimshuffle('x')])-mu)))
return GaussianDensitySpec
示例6: __call__
def __call__(self, A, b, inference=False):
if inference is True:
solve = slinalg.Solve()
x = solve(A, b)
else:
x = nlinalg.matrix_inverse(A).dot(b)
return x
示例7: propagate
def propagate(f, l, R, mu, eps):
# The similarity matrix W is a linear combination of the slices in R
W = T.tensordot(R, mu, axes=1)
# The following indices correspond to labeled and unlabeled examples
labeled = T.eq(l, 1).nonzero()
unlabeled = T.eq(l, 0).nonzero()
# Calculating the graph Laplacian of W
D = T.diag(W.sum(axis=0))
L = D - W
# Computing L_UU (the Laplacian over unlabeled examples)
L_UU = L[unlabeled][:, unlabeled][:, 0, :]
# Computing the inverse of the (regularized) Laplacian iA = (L_UU + epsI)^-1
epsI = eps * T.eye(L_UU.shape[0])
rL_UU = L_UU + epsI
iA = nlinalg.matrix_inverse(rL_UU)
# Computing W_UL (the similarity matrix between unlabeled and labeled examples)
W_UL = W[unlabeled][:, labeled][:, 0, :]
f_L = f[labeled]
# f* = (L_UU + epsI)^-1 W_UL f_L
f_star = iA.dot(W_UL.dot(f_L))
return f_star
示例8: test_gpu_matrix_inverse_inplace_opt
def test_gpu_matrix_inverse_inplace_opt(self):
A = theano.tensor.fmatrix("A")
fn = theano.function([A], matrix_inverse(A), mode=mode_with_gpu)
assert any([
node.op.inplace
for node in fn.maker.fgraph.toposort() if
isinstance(node.op, GpuMagmaMatrixInverse)
])
示例9: _calc_caylay_delta
def _calc_caylay_delta(step_size, param, gradient):
A = Tensor.dot(((step_size / 2) * gradient).T, param) - Tensor.dot(param.T, ((step_size / 2) * gradient))
I = Tensor.identity_like(A)
temp = I + A
# Q = Tensor.dot(batched_inv(temp.dimshuffle('x',0,1))[0], (I - A))
Q = Tensor.dot(matrix_inverse(temp), I - A)
update = Tensor.dot(param, Q)
delta = (step_size / 2) * Tensor.dot((param + update), A)
return update, delta
示例10: invLogDet
def invLogDet(C):
# Return inv(A) and log det A where A = C . C^T
iC = nlinalg.matrix_inverse(C)
iC.name = "i" + C.name
iA = T.dot(iC.T, iC)
iA.name = "i" + C.name[1:]
logDetA = 2.0 * T.sum(T.log(T.abs_(T.diag(C))))
logDetA.name = "logDet" + C.name[1:]
return (iA, logDetA)
示例11: test_inverse_singular
def test_inverse_singular():
singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2, dtype=theano.config.floatX)
a = tensor.matrix()
f = function([a], matrix_inverse(a))
try:
f(singular)
except numpy.linalg.LinAlgError:
return
assert False
示例12: logp
def logp(self, X):
n = self.n
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(
((n - p - 1) * log(IXI) - trace(matrix_inverse(V).dot(X)) -
n * p * log(2) - n * log(IVI) - 2 * multigammaln(n / 2., p)) / 2,
n > (p - 1))
示例13: invert_weight_matrix_symb
def invert_weight_matrix_symb(w):
invw = []
for i in range(len(w)):
# layer_weight = w[-(i+1)]
if i%2 == 1:
layer_weight = w[-(i+1)]
print("inv val", -(i+1+1), "of length", len(w))
invw.append(matrix_inverse(layer_weight))
else:
layer_weight = w[-(i+1)]
print("bias inv val", -(i+1-1), "of length", len(w))
invw.append(-layer_weight)
return invw
示例14: blk_chol_inv
def blk_chol_inv(A, B, b, lower = True, transpose = False):
'''
Solve the equation Cx = b for x, where C is assumed to be a
block-bi-diagonal matrix ( where only the first (lower or upper)
off-diagonal block is nonzero.
Inputs:
A - [T x n x n] tensor, where each A[i,:,:] is the ith block diagonal matrix
B - [T-1 x n x n] tensor, where each B[i,:,:] is the ith (upper or lower)
1st block off-diagonal matrix
lower (default: True) - boolean specifying whether to treat B as the lower
or upper 1st block off-diagonal of matrix C
transpose (default: False) - boolean specifying whether to transpose the
off-diagonal blocks B[i,:,:] (useful if you want to compute solve
the problem C^T x = b with a representation of C.)
Outputs:
x - solution of Cx = b
'''
if transpose:
A = A.dimshuffle(0, 2, 1)
B = B.dimshuffle(0, 2, 1)
if lower:
x0 = Tla.matrix_inverse(A[0]).dot(b[0])
def lower_step(Akp1, Bk, bkp1, xk):
return Tla.matrix_inverse(Akp1).dot(bkp1-Bk.dot(xk))
X = theano.scan(fn = lower_step, sequences=[A[1:], B, b[1:]], outputs_info=[x0])[0]
X = T.concatenate([T.shape_padleft(x0), X])
else:
xN = Tla.matrix_inverse(A[-1]).dot(b[-1])
def upper_step(Akm1, Bkm1, bkm1, xk):
return Tla.matrix_inverse(Akm1).dot(bkm1-(Bkm1).dot(xk))
X = theano.scan(fn = upper_step, sequences=[A[:-1][::-1], B[::-1], b[:-1][::-1]], outputs_info=[xN])[0]
X = T.concatenate([T.shape_padleft(xN), X])[::-1]
return X
示例15: logp
def logp(self, X):
n = self.n
p = self.p
V = self.V
IVI = det(V)
IXI = det(X)
return bound(((n - p - 1) * tt.log(IXI)
- trace(matrix_inverse(V).dot(X))
- n * p * tt.log(2) - n * tt.log(IVI)
- 2 * multigammaln(n / 2., p)) / 2,
matrix_pos_def(X),
tt.eq(X, X.T),
n > (p - 1))