本文整理汇总了Python中theano.sandbox.linalg.ops.matrix_inverse函数的典型用法代码示例。如果您正苦于以下问题:Python matrix_inverse函数的具体用法?Python matrix_inverse怎么用?Python matrix_inverse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix_inverse函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: linearRegression_1
def linearRegression_1(inputs, outputs):
"""
Computers the least squares estimator (LSE) B_hat that minimises the sum of the
squared errors.
Computes B_hat as B_hat = (X.T . X)^-1 . X.T . y
-> Ordinarly Least Squares (OLS)
http://en.wikipedia.org/wiki/Ordinary_least_squares
In:
inputs: Matrix of inputs (X) (nxp matrix)
format: [[observation_1], ..., [observation_n]]
outputs: Column vector (Matrix) of outputs y
format: [[y_1], ... , [y_n]]
Out:
B_hat: Column vector (Matrix) of fitted slopes
format: [[b_0], ... , [b_{p-1}]]
"""
X = T.dmatrix('X')
y = T.dcol('y')
# B_hat = (X.T . X)^-1 . X.T . y
# http://deeplearning.net/software/theano/library/sandbox/linalg.html
# MatrixInverse is the class.
# matrix_inverse is the method base upon the MatrixInverse class.
B_hat = T.dot(T.dot(linOps.matrix_inverse(T.dot(X.T, X)),X.T),y)
lse = function([X, y], B_hat)
b = lse(inputs, outputs)
return b
示例2: test_W_jump
def test_W_jump(self):
" tests that W is where I think it should be "
stats = self.stats
cov_hs = stats.d['second_hs']
assert cov_hs.dtype == config.floatX
#mean_hsv[i,j] = E_D,Q h_i s_i v_j
mean_hsv = stats.d['mean_hsv']
regularized = cov_hs + alloc_diag(T.ones_like(self.model.mu) * self.model.W_eps)
assert regularized.dtype == config.floatX
inv = matrix_inverse(regularized)
assert inv.dtype == config.floatX
new_W = T.dot(mean_hsv.T, inv)
f = function([], new_W)
Wv = f()
aWv = self.model.W.get_value()
diffs = Wv - aWv
max_diff = np.abs(diffs).max()
if max_diff > self.tol:
raise Exception("W deviates from its correct value by at most "+str(max_diff))
示例3: test_matrix_inverse_solve
def test_matrix_inverse_solve():
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
A = theano.tensor.dmatrix('A')
b = theano.tensor.dmatrix('b')
node = matrix_inverse(A).dot(b).owner
[out] = inv_as_solve.transform(node)
assert isinstance(out.owner.op, Solve)
示例4: test_inverse_singular
def test_inverse_singular():
singular = numpy.array([[1, 0, 0]] + [[0, 1, 0]] * 2,
dtype=theano.config.floatX)
a = tensor.matrix()
f = function([a], matrix_inverse(a))
try:
f(singular)
except numpy.linalg.LinAlgError:
return
assert False
示例5: test_matrix_inverse_as_solve_right
def test_matrix_inverse_as_solve_right():
if not imported_scipy:
raise SkipTest("Scipy needed for the Solve op.")
A = theano.tensor.dmatrix('A')
B = theano.tensor.dmatrix('B')
node = B.dot(matrix_inverse(A)).owner
[out] = inv_as_solve.transform(node)
# take into account the transpose after the solve operation, so go up one
# in expression tree
assert isinstance(out.owner.inputs[0].owner.op, Solve)
示例6: test_rop_lop
def test_rop_lop():
mx = tensor.matrix('mx')
mv = tensor.matrix('mv')
v = tensor.vector('v')
y = matrix_inverse(mx).sum(axis=0)
yv = tensor.Rop(y, mx, mv)
rop_f = function([mx, mv], yv)
sy, _ = theano.scan(lambda i, y, x, v: (tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, mx, mv])
scan_f = function([mx, mv], sy)
rng = numpy.random.RandomState(utt.fetch_seed())
vx = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
vv = numpy.asarray(rng.randn(4, 4), theano.config.floatX)
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert _allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
raised = False
try:
tensor.Rop(
theano.clone(y, replace={mx: break_op(mx)}),
mx,
mv)
except ValueError:
raised = True
if not raised:
raise Exception((
'Op did not raised an error even though the function'
' is not differentiable'))
vv = numpy.asarray(rng.uniform(size=(4,)), theano.config.floatX)
yv = tensor.Lop(y, mx, v)
lop_f = function([mx, v], yv)
sy = tensor.grad((v * y).sum(), mx)
scan_f = function([mx, v], sy)
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert _allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
示例7: test_inverse_correctness
def test_inverse_correctness():
rng = numpy.random.RandomState(utt.fetch_seed())
r = rng.randn(4, 4).astype(theano.config.floatX)
x = tensor.matrix()
xi = matrix_inverse(x)
ri = function([x], xi)(r)
assert ri.shape == r.shape
assert ri.dtype == r.dtype
rir = numpy.dot(ri, r)
rri = numpy.dot(r, ri)
assert _allclose(numpy.identity(4), rir), rir
assert _allclose(numpy.identity(4), rri), rri
示例8: feature_sign_search
def feature_sign_search( self ):
'''
This function runs the feature_sign_search on the coefficients while
holding the bases clamped.
'''
#Declare effective zero for usefullness
effective_zero = 1e-19
opt_cond = np.inf
'''
theta[ i ] is:
-1 if self.coefficients[ i ] < 0
1 if self.coefficients[ i ] > 0
0 if self.coefficients[ i ] == 0
'''
theta = T.sgn( self.coefficients )
active_set = T.ivector( name = 'active_set' )
#This corresponds to the gram matrix by dotting the basis vectors by it's transpose
gram_matrix = T.dot( self.bases.T, self.bases )
target_correlation = T.dot( self.bases.T, self.x )
cost = -T.sum( ( target_correlation - T.dot( gram_matrix, self.coefficients ) ) ** 2 )
cost_grad = T.grad( cost, self.coefficients )
candidate = T.argmax( cost_grad )
if T.gt( cost_grad[ candidate ], self.gamma ):
print 'Found candidate greater than gamma'
updated_theta = T.set_subtensor( theta[ candidate ], -1 )
active_set = active_set + candidate
if T.lt( cost_grad[ candidate ], ( -1 * self.gamma ) ):
print 'Found candidate less than negative gamma'
updated_theta = T.set_subtensor( theta[ candidate ], 1 )
active_set = active_set + candidate
active_bases = self.bases[ active_set ]
active_coefficients = self.coefficients[ active_set ]
active_theta = updated_theta[ active_set ]
new_coefficients = ( matrix_inverse( T.dot( active_bases.T, active_bases ) ) *
( T.dot( active_bases.T, target_correlation )
- 0.5 * self.gamma * active_theta ) )
sign_changes = 0
示例9: test_matrix_inverse_solve
def test_matrix_inverse_solve():
A = theano.tensor.dmatrix('A')
b = theano.tensor.dmatrix('b')
node = matrix_inverse(A).dot(b).owner
[out] = inv_as_solve.transform(node)
assert isinstance(out.owner.op, Solve)
示例10: grad
def grad(self, inputs, g_outputs):
gz, = g_outputs
x, = inputs
return [gz * T.dot(x,op.matrix_inverse(T.dot(x.T,x)))]