本文整理汇总了Python中numpy.diagflat函数的典型用法代码示例。如果您正苦于以下问题:Python diagflat函数的具体用法?Python diagflat怎么用?Python diagflat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了diagflat函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_Hint_EXACT
def get_Hint_EXACT(ph_count, at_count, wc, wa, g):
#------------------------------------------------------------------------------------------------------------------
adiag = np.sqrt(np.arange(1, ph_count+1))
across = np.diagflat(adiag, -1)
a = np.diagflat(adiag, 1)
acrossa = np.dot(across, a)
#------------------------------------------------------------------------------------------------------------------
sigmadiag = [1]
sigmacross = np.diagflat(sigmadiag, -1)
sigma = np.diagflat(sigmadiag, 1)
sigmacrosssigma = np.dot(sigmacross, sigma)
#------------------------------------------------------------------------------------------------------------------
H_dim = (ph_count+1) * pow(2, at_count)
H_int = np.zeros([H_dim, H_dim], dtype=complex)
#------------------------------------------------------------------------------------------------------------------
for i in range(1, at_count+1):
#------------------------------------------------
elem = (across + a)
before = np.identity(pow(2, i-1))
elem = np.kron(elem, before)
elem = np.kron(elem, sigmacross + sigma)
after = np.identity(pow(2, at_count-i))
elem = np.kron(elem, after)
H_int += g * elem
#------------------------------------------------
#------------------------------------------------------------------------------------------------------------------
return H_int
示例2: jac_dtwa
def jac_dtwa(s, t, param):
"""
Jacobian of the general case. First order.
This is given by 9 NXN submatrices:
J00=J11=J22=0
Although Jacobian is NOT antisymmetric in general! See below
J01 = +J_z diag(J|s^x>) + h(t) h_z - J_y (J#|s^z>)
J10 = -J_z diag(J|s^x>) - h(t) h_z + J_x (J#|s^z>)
J02 = -J_y diag(J|s^y>) - h(t) h_y + J_z (J#|s^y>)
J20 = +J_y diag(J|s^y>) + h(t) h_y - J_x (J#|s^y>)
J12 = +J_x diag(J|s^x>) + h(t) h_x - J_z (J#|s^x>)
J21 = -J_x diag(J|s^x>) - h(t) h_x + J_y (J#|s^x>)
Here, '#' (hash operator) means multiply each row of a matrix by the
corresponding vector element. This is implemented by numpy.multiply()
"""
N = param.latsize
# s[0:N] = sx , s[N:2*N] = sy, s[2*N:3*N] = sz
full_jacobian = np.zeros(shape=(3 * N, 3 * N))
diag_jsx = np.diagflat((param.jmat.dot(s[0:N]))) / param.norm
diag_jsy = np.diagflat((param.jmat.dot(s[N : 2 * N]))) / param.norm
# diag_jsz = np.diagflat((param.jmat.dot(s[2*N:3*N])))/param.norm
hash_jsx = (np.multiply(param.jmat.T, s[0:N]).T) / param.norm
hash_jsy = (np.multiply(param.jmat.T, s[N : 2 * N]).T) / param.norm
hash_jsz = (np.multiply(param.jmat.T, s[2 * N : 3 * N]).T) / param.norm
full_jacobian[0:N, N : 2 * N] = param.jz * diag_jsx + drivemat * param.hz - param.jy * hash_jsz
full_jacobian[N : 2 * N, 0:N] = -param.jz * diag_jsx - param.hz + param.jx * hash_jsz
full_jacobian[0:N, 2 * N : 3 * N] = -param.jy * diag_jsy - param.hy + param.jz * hash_jsy
full_jacobian[2 * N : 3 * N, 0:N] = param.jy * diag_jsy + param.hy - param.jx * hash_jsy
full_jacobian[N : 2 * N, 2 * N : 3 * N] = param.jx * diag_jsx + param.hx - param.jz * hash_jsx
full_jacobian[2 * N : 3 * N, N : 2 * N] = -param.jx * diag_jsx - param.hx + param.jy * hash_jsx
return full_jacobian
示例3: fit_map
def fit_map(self, X, y, full_posterior=True):
"""
Fit a MAP estimate
X: features, n_samples by n_features nd-array
y: target values, n_samples array
"""
if self.fit_intercept:
X = self.add_intercept(X)
#data setup
f_dim = X.shape[1]
if np.isscalar(self.inv_lamb):
inv_lamb = np.diagflat(np.repeat(self.inv_lamb,f_dim))
else:
inv_lamb = np.diagflat(self.inv_lamb)
if np.isscalar(self.beta_mu):
beta_mu = np.repeat(self.beta_mu,f_dim)
else:
beta_mu = self.beta_mu
sigma = self.sigma
#let the actual calculation begin
l = sigma**2 * inv_lamb
s = LA.inv(X.T.dot(X)+l)
# adding in the mean of the prior
b0 = sigma**2 * inv_lamb.dot(beta_mu)
self.beta = s.dot(X.T.dot(y) + b0)
if full_posterior:
self.Sigma = sigma * sigma * s
示例4: trasporto_ei_bcp
def trasporto_ei_bcp(M, N, xmin, xmax, tinz, tfin, vel, ic):
dx = (xmax - xmin) / M
dt = (tfin - tinz) / N
x = xmin + array(range(0, M + 1)) * dx
t = array(range(0, N + 1)) * dt
u = zeros([M + 1, N + 1])
u[:, 0] = ic(x)
uold = zeros([M + 1])
uold[0:M + 1] = u[:, 0]
a = vel
# CFL
cfl = a * dt / dx
# numero di Courant
nu = a * dt / dx
A = eye(M + 1) + 0.5 * nu * diagflat(ones([1, M]), 1) - 0.5 * nu * diagflat(ones([1, M]), -1)
A[0, M] = -0.5 * nu
A[M, 1] = 0.5 * nu
if cfl > 1:
print "CFL " + str(cfl)
raise ArithmeticError
else:
for n in range(0, N + 1):
unew = solve(A, uold.transpose()).transpose()
uold = unew
u[0:M + 1, n] = unew
return u, x, t
示例5: burgers_ei_bcp
def burgers_ei_bcp(M, N, xmin, xmax, tinz, tfin, ic):
dx = (xmax - xmin) / M
dt = (tfin - tinz) / N
x = xmin + array(range(0, M + 1)) * dx
t = array(range(0, N + 1)) * dt
u = zeros([M + 1, N + 1])
u[:, 0] = ic(x)
uold = zeros([M + 1])
uold[0:M + 1] = u[:, 0]
for n in range(0, N + 1):
# numero di Courant
nu = max(abs(uold)) * dt / dx
# CFL
cfl = nu * dt / dx
if cfl > 1:
print "CFL " + str(cfl)
raise ArithmeticError
else:
A = eye(M + 1) + 0.5 * dt * diagflat(uold[0:M], 1) / dx - 0.5 * dt * diagflat(uold[1:M + 1], -1) / dx
A[0, M] = -0.5 * dt * uold[0] / dx
A[M, 1] = 0.5 * dt * uold[M] / dx
unew = solve(A, uold.transpose()).transpose()
uold = unew
u[0:M + 1, n] = unew
return u, x, t
示例6: test_project
def test_project():
X = np.diagflat([-2, -1, 0, 1, 2])
# eigenvalues -2, -1, 0, 1, 2; eigenvectors are I
Xproj = ProjectPSD().fit_transform(X)
assert np.allclose(Xproj, np.diagflat([0, 0, 0, 1, 2]))
Xproj2 = ProjectPSD().fit(X).transform(X)
assert np.allclose(Xproj2, np.diagflat([0, 0, 0, 1, 2]))
Xproj3 = ProjectPSD(negatives_likely=True).fit(X).transform(X[:3, :])
assert np.allclose(Xproj3, np.zeros((3, 5)))
Xproj4 = ProjectPSD(negatives_likely=False).fit(X).transform(X[:3, :])
assert np.allclose(Xproj4, np.zeros((3, 5)))
Xproj5 = ProjectPSD(negatives_likely=True, copy=False, min_eig=.5) \
.fit_transform(X.copy())
assert np.allclose(Xproj5, np.diagflat([.5, .5, .5, 1, 2]))
Xproj6 = ProjectPSD(negatives_likely=True, copy=False, min_eig=.5) \
.fit(X.copy()).transform(X.copy())
assert np.allclose(Xproj6, np.diagflat([.5, .5, 0, 1, 2]))
assert_raises(TypeError, lambda: ProjectPSD().fit(X[:2, :]))
assert_raises(TypeError, lambda: ProjectPSD().fit_transform(X[:2, :]))
assert_raises(TypeError, lambda: ProjectPSD().fit(X).transform(X[:, :2]))
示例7: get_Hatoms
def get_Hatoms(ph_count, at_count, wc, wa, g):
#------------------------------------------------------------------------------------------------------------------
sigmadiag = [1]
sigmacross = np.diagflat(sigmadiag, -1)
sigma = np.diagflat(sigmadiag, 1)
sigmacrosssigma = np.dot(sigmacross, sigma)
#------------------------------------------------------------------------------------------------------------------
ph_dim = ph_count+1
I_ph = np.identity(ph_dim)
#------------------------------------------------------------------------------------------------------------------
H_dim = (ph_count+1) * pow(2, at_count)
H_atoms = np.zeros([H_dim, H_dim])
#------------------------------------------------------------------------------------------------------------------
for i in range(1, at_count+1):
elem = sigmacrosssigma
at_prev = np.identity(pow(2, i-1))
elem = np.kron(at_prev, elem)
at_next = np.identity( pow(2, at_count-i))
elem = np.kron(elem, at_next)
H_atoms += wa * np.kron(I_ph, elem)
#------------------------------------------------------------------------------------------------------------------
return H_atoms
示例8: main
def main():
n=10
# prepare matrix a
a=np.diag([5.]*n)
a+=np.diagflat([2.]*(n-1),1)
a+=np.diagflat([2.]*(n-1),-1)
print(a)
b=np.array([3,1,4,0,5,-1,6,-2,7,-15],dtype='f').T
#initial value x
x=np.ones(10).T
D=np.diag(np.diag(a))
L=np.tril(a,-1)
U=np.triu(a,+1)
M= -np.linalg.inv(D) @ (L+U)
N=np.linalg.inv(D)
for k in range(maxiteration):
x_new=M @ x + N @ b
if(np.linalg.norm(x_new-x) <epsilon):
break
x=x_new
else:
print("fail jacobi method ...")
exit(1)
print("the sol of ax = b using Jacobi method is \n{}".format(x))
print("iteration {} times".format(k))
print("indeed ax-b is \n{}".format(a @x -b))
print("you can check sol x using sympy...")
sy_a=sy.Matrix(a)
sy_x=sy_a.solve(b)
print(sy_x)
示例9: matrix_simulstionnp
def matrix_simulstionnp(self, temp, dt, t, outtemps):
size = len(self.npwam[0])
time = 0
dt = dt
I = np.diagflat(np.array([[1]*size]))
neg_c = np.diagflat((self.npwam.dot(np.array([[-1]]*size))))
M = self.npthermal_mass.dot(self.npwam + neg_c).dot(dt) + I
print M, "Tdot"
temp = np.array(temp) # K
#temps = [x for x in temp.matrix]
temps = [[cell for cell in row] for row in temp]
# finish matrix simulation
hour = 60*60*3
idx = 0
qhour = 15*60
while time < t:
if hour == (60*60*3):
temp[3][0] = outtemps[idx][0]
hour = 0
idx += 1
tempn = M.dot(temp)
temp = tempn
if qhour >= 15*60:
for i in range(len(tempn)):
temps[i].append(temp[i][0])
qhour = 0
time += dt
hour += dt
qhour += dt
return temps
示例10: computePD
def computePD(self, bodyUniqueId, jointIndices, desiredPositions, desiredVelocities, kps, kds, maxForces, timeStep):
numJoints = self._pb.getNumJoints(bodyUniqueId)
jointStates = self._pb.getJointStates(bodyUniqueId, jointIndices)
q1 = []
qdot1 = []
zeroAccelerations = []
for i in range (numJoints):
q1.append(jointStates[i][0])
qdot1.append(jointStates[i][1])
zeroAccelerations.append(0)
q = np.array(q1)
qdot=np.array(qdot1)
qdes = np.array(desiredPositions)
qdotdes = np.array(desiredVelocities)
qError = qdes - q
qdotError = qdotdes - qdot
Kp = np.diagflat(kps)
Kd = np.diagflat(kds)
p = Kp.dot(qError)
d = Kd.dot(qdotError)
forces = p + d
M1 = self._pb.calculateMassMatrix(bodyUniqueId,q1)
M2 = np.array(M1)
M = (M2 + Kd * timeStep)
c1 = self._pb.calculateInverseDynamics(bodyUniqueId, q1, qdot1, zeroAccelerations)
c = np.array(c1)
A = M
b = -c + p + d
qddot = np.linalg.solve(A, b)
tau = p + d - Kd.dot(qddot) * timeStep
maxF = np.array(maxForces)
forces = np.clip(tau, -maxF , maxF )
#print("c=",c)
return tau
示例11: ARD
def ARD(X,Y):
# X - (p x q) matrix with inputs in rows
# Y - (p, 1) matrix with measurements
# Implelements the ARD regression, adapted from:
#M. Sahani and J. F. Linden.
#Evidence optimization techniques for estimating stimulus-response functions.
#In S. Becker, S. Thrun, and K. Obermayer, eds., Advances in Neural Information Processing Systems, vol. 15, pp. 301-308, Cambridge, MA, 2003.
(p,q) = numpy.shape(X)
#initialize parameters
sigma_sq = 0.1
CC = X.T * X
XY = X.T * Y
start_flag = False
alpha = numpy.mat(numpy.zeros((q,1)))+2.0
for i in xrange(0,100):
sigma = numpy.linalg.inv(CC/sigma_sq + numpy.diagflat(alpha))
ni = sigma * (XY) / (sigma_sq)
sigma_sq = numpy.sum(numpy.power(Y - X*ni,2))/(p - numpy.sum(1 - numpy.multiply(numpy.mat(numpy.diagonal(sigma)).T,alpha)));
print numpy.min(numpy.abs(ni))
alpha = numpy.mat(numpy.divide((1 - numpy.multiply(alpha,numpy.mat(numpy.diagonal(sigma)).T)) , numpy.power(ni,2)))
print sigma_sq
w = numpy.linalg.inv(CC + sigma_sq * numpy.diagflat(alpha)) * (XY)
print alpha
print sigma_sq
return w
示例12: gen_roots_and_weights
def gen_roots_and_weights(n, an_func, sqrt_bn_func, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal interval
"""
nn = np.arange(1.0,n)
sqrt_bn = sqrt_bn_func(nn)
an = an_func(np.concatenate(([0], nn)))
x, v = eig((np.diagflat(an) +
np.diagflat(sqrt_bn,1) +
np.diagflat(sqrt_bn,-1)))
answer = []
sortind = x.real.argsort()
answer.append(x[sortind])
answer.append((mu*v[0]**2)[sortind])
return answer
示例13: rvmtrain
def rvmtrain(xinp, yinp, r, beta = 100):
dmat = getDesMat(xinp, r)
N = len(yinp)
target = yinp
alphas = np.ones((len(xinp) +1, 1))
Amat = np.diagflat(alphas)
newAlphas = np.copy(alphas)
converged = False
idx = np.ones(len(alphas)) == 1
mMat = np.zeros(len(alphas))
iterationCount = 0
for t in range(5000):
iterationCount = iterationCount + 1
idx = np.abs(newAlphas) < alphaThreshold
idx = np.squeeze(idx)
sig = Amat[idx][:,idx] + beta * np.dot(dmat[:,idx].transpose(), dmat[:,idx])
sigMat = np.linalg.inv(sig)
mMat[idx] = beta * np.dot(sigMat, np.dot(dmat[:,idx].transpose(), target))
oldAlphas = np.copy(newAlphas)
gamma = 1 - np.transpose(newAlphas[idx]) * np.diag(sigMat)
newAlphas[idx] = np.transpose( gamma / np.array(map(float,mMat[idx]**2)) )
beta = ( N - np.sum(gamma) ) / np.linalg.norm( yinp - np.dot(dmat[:,idx], mMat[idx]) )
delta = sum(np.abs(newAlphas - oldAlphas))
# if (sum(newAlphas<0)>0):
# print iterationCount
if (delta < convThreshold):
print "\n\n\n\n\n!!!!!CONVERGED!!!!!\n\n\n\n\n"
converged = True
print newAlphas
break
Amat = np.diagflat(newAlphas)
relevant_vecs_ind = []
x_rel =[]
y_rel = []
print "iterations: {}".format(iterationCount)
# If we start from 0 then we check if the bias term of alpha is relevant. If it is so we pick the last training point
# x[-1] and y[-1] to be a relevancevector. The bias term is problematic.
for i in range(1,N+1):
if newAlphas[i] < alphaThreshold:
relevant_vecs_ind.append(i+1)
x_rel.append(xinp[i-1])
y_rel.append(yinp[i-1])
print "number of relevancevectors (alpha < {0}): ".format(alphaThreshold) + str(np.sum(idx[1:]))
muMat = mMat
print "beta: " + str(beta)
# for a in newAlphas:
# print a
return muMat, beta, converged, idx, x_rel, y_rel
示例14: TestSetCovariances
def TestSetCovariances(self,hyperparameters,X,Y): #compute test set covariances
[n, D] = X.shape;
ell = np.exp(hyperparameters[0:D]); # characteristic length scale
sf2 = np.exp(2*hyperparameters[D]); # signal variance
A = np.dot(sf2*(1+self.jitter),np.ones([np.size(Y,0),1]));
B = sf2*np.exp(-self.sq_dist2(np.dot(np.diagflat(1./ell),X.T),np.dot(np.diagflat(1./ell),Y.T))/2);#TODO diagflat
return [A,B] #TODO [A,B]
示例15: MStep
def MStep(self, posterior, data, mix_pi=None):
if isinstance(data, DataSet):
x = data.internalData
elif hasattr(data, "__iter__"):
x = data
else:
raise TypeError, "Unknown/Invalid input to MStep."
post = posterior.sum() # sum of posteriors
self.mean = np.dot(posterior, x) / post
# centered input values (with new mus)
centered = np.subtract(x, np.repeat([self.mean], len(x), axis=0));
# estimating correlation factor
sigma = np.dot(np.transpose(np.dot(np.identity(len(posterior)) * posterior, centered)), centered) / post # sigma/covariance matrix
diagsigma = np.diagflat(1.0 / np.diagonal(sigma)) # vector with diagonal entries of sigma matrix
correlation = np.dot(np.dot(diagsigma, np.multiply(sigma, sigma)), diagsigma) # correlation matrix with entries sigma_xy^2/(sigma^2_x * sigma^2_y)
correlation = correlation - np.diagflat(np.diagonal(correlation)) # making diagonal entries = 0
# XXX - check this
parents = self.maximunSpanningTree(correlation) # return maximun spanning tree from the correlation matrix
self.parents = self.directTree(parents, 0) # by default direct tree from 0
# XXX note that computational time could be saved as these functions share same suficient statistics
ConditionalGaussDistribution.MStep(self, posterior, data, mix_pi)