本文整理汇总了Python中numpy.matrix函数的典型用法代码示例。如果您正苦于以下问题:Python matrix函数的具体用法?Python matrix怎么用?Python matrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了matrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_square_matrices_1
def test_square_matrices_1(self):
op4 = OP4()
# matrices = op4.read_op4(os.path.join(op4Path, fname))
form1 = 1
form2 = 2
form3 = 2
from numpy import matrix, ones, reshape, arange
A1 = matrix(ones((3, 3), dtype="float64"))
A2 = reshape(arange(9, dtype="float64"), (3, 3))
A3 = matrix(ones((1, 1), dtype="float32"))
matrices = {"A1": (form1, A1), "A2": (form2, A2), "A3": (form3, A3)}
for (is_binary, fname) in [(False, "small_ascii.op4"), (True, "small_binary.op4")]:
op4_filename = os.path.join(op4Path, fname)
op4.write_op4(op4_filename, matrices, name_order=None, precision="default", is_binary=False)
matrices2 = op4.read_op4(op4_filename, precision="default")
(form1b, A1b) = matrices2["A1"]
(form2b, A2b) = matrices2["A2"]
self.assertEqual(form1, form1b)
self.assertEqual(form2, form2b)
(form1b, A1b) = matrices2["A1"]
(form2b, A2b) = matrices2["A2"]
(form3b, A3b) = matrices2["A3"]
self.assertEqual(form1, form1b)
self.assertEqual(form2, form2b)
self.assertEqual(form3, form3b)
self.assertTrue(array_equal(A1, A1b))
self.assertTrue(array_equal(A2, A2b))
self.assertTrue(array_equal(A3, A3b))
del A1b, A2b, A3b
del form1b, form2b, form3b
示例2: test_ohess
def test_ohess():
"""Simple test of ohess matrix."""
n = 10
a = rogues.ohess(n)
# Test to see if a is orthogonal...
b = np.matrix(a) * np.matrix(a.T)
assert(np.allclose(b, np.eye(n)))
示例3: __init__
def __init__(self, x_m, y_m, heading_d=None):
if heading_d is None:
heading_d = 0.0
self._estimates = numpy.matrix(
# x m, y m, heading d, speed m/s
[x_m, y_m, heading_d, 0.0]
).transpose() # x
# This will be populated as the filter runs
# TODO: Ideally, this should be initialized to those values, for right
# now, identity matrix is fine
self._covariance_matrix = numpy.matrix([ # P
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
# TODO: Tune this parameter for maximum performance
self._process_noise = numpy.matrix([ # Q
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
])
self._last_observation_s = time.time()
self._estimated_turn_rate_d_s = 0.0
示例4: test_arclength_half_circle
def test_arclength_half_circle():
""" Here we define the tests for the lenght computer of our ArcLengthParametrizer, we try it with a half a
circle and a fan.
We test it both in 2d and 3d."""
# Number of interpolation points minus one
n = 5
toll = 1.e-6
points = np.linspace(0, 1, (n+1) )
R = 1
P = 1
control_points_2d = np.asmatrix(np.zeros([n+1,2]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
control_points_2d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_2d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d = np.asmatrix(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
control_points_3d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d[:,2] = np.transpose(np.matrix([P*i for i in range(n+1)]))
vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
dummy_arky_2d = ArcLengthParametrizer(vsl, control_points_2d)
dummy_arky_3d = ArcLengthParametrizer(vsl, control_points_3d)
length2d = dummy_arky_2d.compute_arclength()[-1,1]
length3d = dummy_arky_3d.compute_arclength()[-1,1]
# print (length2d)
# print (n * np.sqrt(2))
l2 = np.pi * R
l3 = 2 * np.pi * np.sqrt(R * R + (P / (2 * np.pi)) * (P / (2 * np.pi)))
print (length2d, l2)
print (length3d, l3)
assert (length2d - l2) < toll
assert (length3d - l3) < toll
示例5: svdUpdate
def svdUpdate(U, S, V, a, b):
"""
Update SVD of an (m x n) matrix `X = U * S * V^T` so that
`[X + a * b^T] = U' * S' * V'^T`
and return `U'`, `S'`, `V'`.
`a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate
incremental addition of one new document and/or term to an already existing
decomposition.
"""
rank = U.shape[1]
m = U.T * a
p = a - U * m
Ra = numpy.sqrt(p.T * p)
assert float(Ra) > 1e-10
P = (1.0 / float(Ra)) * p
n = V.T * b
q = b - V * n
Rb = numpy.sqrt(q.T * q)
assert float(Rb) > 1e-10
Q = (1.0 / float(Rb)) * q
K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
u, s, vt = numpy.linalg.svd(K, full_matrices=False)
tUp = numpy.matrix(u[:, :rank])
tVp = numpy.matrix(vt.T[:, :rank])
tSp = numpy.matrix(numpy.diag(s[:rank]))
Up = numpy.bmat("U P") * tUp
Vp = numpy.bmat("V Q") * tVp
Sp = tSp
return Up, Sp, Vp
示例6: __init__
def __init__(self):
self._position = numpy.zeros((2,))
self._position_frozen = False
self._matrix = numpy.matrix(numpy.identity(3, numpy.float64))
self._temp_matrix = numpy.matrix(numpy.identity(3, numpy.float64))
self._selected = False
self._scene = None
示例7: __init__
def __init__(self, mol, mints):
"""
Initialize the rhf
:param mol: a psi4 molecule object
:param mints: a molecular integrals object (from MintsHelper)
"""
self.mol = mol
self.mints = mints
self.V_nuc = mol.nuclear_repulsion_energy()
self.T = np.matrix(mints.ao_kinetic())
self.S = np.matrix(mints.ao_overlap())
self.V = np.matrix(mints.ao_potential())
self.g = np.array(mints.ao_eri())
# Determine the number of electrons and the number of doubly occupied orbitals
self.nelec = -mol.molecular_charge()
for A in range(mol.natom()):
self.nelec += int(mol.Z(A))
if mol.multiplicity() != 1 or self.nelec % 2:
raise Exception("This code only allows closed-shell molecules")
self.ndocc = self.nelec / 2
self.maxiter = psi4.get_global_option('MAXITER')
self.e_convergence = psi4.get_global_option('E_CONVERGENCE')
self.nbf = mints.basisset().nbf()
示例8: manova1_single_node
def manova1_single_node(Y, GROUP):
### assemble counts:
u = np.unique(GROUP)
nGroups = u.size
nResponses = Y.shape[0]
nComponents = Y.shape[1]
### create design matrix:
X = np.zeros((nResponses, nGroups))
ind0 = 0
for i,uu in enumerate(u):
n = (GROUP==uu).sum()
X[ind0:ind0+n, i] = 1
ind0 += n
### SS for original design:
Y,X = np.matrix(Y), np.matrix(X)
b = np.linalg.pinv(X)*Y
R = Y - X*b
R = R.T*R
### SS for reduced design:
X0 = np.matrix( np.ones(Y.shape[0]) ).T
b0 = np.linalg.pinv(X0)*Y
R0 = Y - X0*b0
R0 = R0.T*R0
### Wilk's lambda:
lam = np.linalg.det(R) / (np.linalg.det(R0) + eps)
### test statistic:
N,p,k = float(nResponses), float(nComponents), float(nGroups)
x2 = -((N-1) - 0.5*(p+k)) * log(lam)
df = p*(k-1)
# return lam, x2, df
return x2
示例9: get_system_model
def get_system_model():
A = np.matrix([[DT, 1.0],
[0.0, DT]])
B = np.matrix([0.0, 1.0]).T
return A, B
示例10: findClosestPointInB
def findClosestPointInB(b_data, a, offset):
xd = offset[0]
yd = offset[1]
theta = offset[2]
T = numpy.matrix([ [math.cos(theta), -math.sin(theta), xd],
[math.sin(theta), math.cos(theta), yd],
[0.0, 0.0, 1.0]
])
a_hom = numpy.matrix([[a[0]],[a[1]],[1.0]])
temp = T*a_hom
a_off = [temp[0,0],temp[1,0]]
minDist = 1e100
minPoint = None
for p in b_data:
dist = math.sqrt((p[0]-a_off[0])**2 + (p[1]-a_off[1])**2)
if dist < minDist:
minPoint = copy(p)
minDist = dist
if minPoint != None:
return minPoint, minDist
else:
raise
示例11: load_matlab_matrix
def load_matlab_matrix( matfile, matname=None ):
"""
Wraps scipy.io.loadmat.
If matname provided, returns np.ndarray representing the index
map. Otherwise, the full dict provided by loadmat is returns.
"""
if not matname:
out = spio.loadmat( matfile )
mat = _extract_mat( out )
# if mat is a sparse matrix, convert it to numpy matrix
try:
mat = np.matrix( mat.toarray() )
except AttributeError:
mat = np.matrix( mat )
return mat
else:
matdict = spio.loadmat( matfile )
mat = matdict[ matname ]
# if mat is a sparse matrix, convert it to numpy matrix
try:
mat = np.matrix( mat.toarray() )
except AttributeError:
mat = np.matrix( mat )
return mat #np.matrix( mat[ matname ] )
示例12: _update
def _update(self):
"""
Calculate those terms for prediction that do not depend on predictive
inputs.
"""
from numpy.linalg import cholesky, solve, LinAlgError
from numpy import transpose, eye, matrix
import types
self._K = self.calc_covariance(self.X)
if not self._K.shape[0]: # we didn't have any data
self._L = matrix(zeros((0, 0), numpy.float64))
self._alpha = matrix(zeros((0, 1), numpy.float64))
self.LL = 0.
else:
try:
self._L = matrix(cholesky(self._K))
except LinAlgError as detail:
raise RuntimeError("""Cholesky decomposition of covariance """
"""matrix failed. Your kernel may not be positive """
"""definite. Scipy complained: %s""" % detail)
self._alpha = solve(self._L.T, solve(self._L, self.y))
self.LL = (
- self.n * math.log(2.0 * math.pi)
- (self.y.T * self._alpha)[0, 0]
) / 2.0
# print self.LL
# import IPython; IPython.Debugger.Pdb().set_trace()
self.LL -= log(diagonal(self._L)).sum()
示例13: predict
def predict(self, x_star):
"""
Predict the process's values on the input values
@arg x_star: Prediction points
@return: ( mean, variance, LL )
where mean are the predicted means, variance are the predicted
variances and LL is the log likelihood of the data for the given
value of the parameters (i.e. not integrating over hyperparameters)
"""
from numpy.linalg import solve
import types
# print 'Predicting'
if 0 == len(self.X):
f_star_mean = matrix(zeros((len(x_star), 1), numpy.float64))
v = matrix(zeros((0, len(x_star)), numpy.float64))
else:
k_star = self.calc_covariance(self.X, x_star)
f_star_mean = k_star.T * self._alpha
if 0 == len(x_star): # no training data
v = matrix(zeros((0, len(x_star)), numpy.float64))
else:
v = solve(self._L, k_star)
V_f_star = self.calc_covariance(x_star) - v.T * v
# print 'Done predicting'
# import IPython; IPython.Debugger.Pdb().set_trace()
return (f_star_mean, V_f_star, self.LL)
示例14: main
def main():
sample='q'
sm_bin='10.0_10.5'
catalogue = 'sm_9.5_s0.2_sfr_c-0.75_250'
#load in fiducial mock
filepath = './'
filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'_cov.npy'
cov = np.matrix(np.load(filepath+filename))
diag = np.diagonal(cov)
filepath = cu.get_output_path() + 'analysis/central_quenching/observables/'
filename = 'sm_9.5_s0.2_sfr_c-0.8_Chinchilla_250_wp_fiducial_'+sample+'_'+sm_bin+'.dat'
data = ascii.read(filepath+filename)
rbins = np.array(data['r'])
mu = np.array(data['wp'])
#load in comparison mock
plt.figure()
plt.errorbar(rbins, mu, yerr=np.sqrt(np.diagonal(cov)), color='black')
plt.plot(rbins, wp, color='red')
plt.xscale('log')
plt.yscale('log')
plt.show()
inv_cov = cov.I
Y = np.matrix((wp-mu))
X = Y*inv_cov*Y.T
print(X)
示例15: test_pascal_1
def test_pascal_1():
"""Simple test of pascal matrix: k = 1."""
# Notice we recover the unit matrix with n = 18, better than previous test
n = 18
a = rogues.pascal(n, 1)
b = np.matrix(a) * np.matrix(a)
assert(np.allclose(b, np.eye(n)))