本文整理汇总了Python中scipy.linalg.block_diag函数的典型用法代码示例。如果您正苦于以下问题:Python block_diag函数的具体用法?Python block_diag怎么用?Python block_diag使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了block_diag函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: vRaman
def vRaman(x,omega=1.0,delta=0.0,epsilon=0.048,phi=4.0/3.0):
x=np.array(x)
s=1
v=np.outer(omega*np.exp(1.0j*2.0*phi*x),Fx(s)*np.sqrt(2.0)/2.0).reshape(x.size,2*s+1,2*s+1)
v=sLA.block_diag(*[np.triu(v[i])+np.conjugate(np.triu(v[i],1)).transpose() for i in range(x.size)])
v+=sLA.block_diag(*[np.diag([epsilon-delta,0.0,epsilon+delta])]*x.size)
return v
示例2: compute_giwc_from_forces
def compute_giwc_from_forces():
A = calculate_local_forces_to_gi_matrix()
CFC_all = block_diag(*[dot(CFC, block_diag(contact.R.T))
for contact in contacts for _ in xrange(4)])
S = span_of_face(CFC_all)
F = face_of_span(dot(A, S))
return F
示例3: proj_affine
def proj_affine(self, P):
d = P.shape[1]
N = P.shape[0]
V = np.array([self.vertices[i].as_np_array() for i in range(self.dim+1)]).T
Q_i = V.T.dot(V)
Q = linalg.block_diag(*[Q_i for i in range(N)])
q = - np.reshape(V.T.dot(P.T).T, (N * (self.dim + 1)))
A_i = np.ones(self.dim + 1)
A = linalg.block_diag(*[A_i for i in range(N)])
## Z * [alpha; lambda].T = c
## lhs of KKT
n_vars = N * (self.dim + 1)
n_cnts = N
Z = np.zeros((n_vars + n_cnts, n_vars + n_cnts))
Z[:n_vars, :n_vars] = Q
Z[n_vars:, :n_vars] = A
Z[:n_vars, n_vars:] = A.T
## rhs of KKT
c = np.zeros(n_vars + n_cnts)
c[:n_vars] = -q
c[n_vars:] = np.ones(n_cnts)
alpha = np.linalg.solve(Z, c)
alpha = alpha[:n_vars].reshape(N, self.dim + 1)
P_affine = alpha.dot(V.T)
return alpha, P_affine
示例4: generate_time_of_use_periods
def generate_time_of_use_periods(self):
"""
time of use periods will be described by NxM indicator matricies
"""
N = const.DAILY_UNITS
quarters = self.generate_quarter_hours()
peak_indicator = [1 if ( (t >= const.PEAK_TIME_RANGE[0]) & (t < const.PEAK_TIME_RANGE[1])) else 0 for t in quarters]
part_peak_indicator = [1 if ( (t >= const.PART_PEAK_TIME_RANGE[0][0]) and (t < const.PART_PEAK_TIME_RANGE[0][1])
or t >= const.PART_PEAK_TIME_RANGE[1][0]) and (t < const.PART_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters]
off_peak_indicator = [1 if ( (t >= const.OFF_PEAK_TIME_RANGE[0][0]) and (t < const.OFF_PEAK_TIME_RANGE[0][1])
or t >= const.OFF_PEAK_TIME_RANGE[1][0]) and (t < const.OFF_PEAK_TIME_RANGE[1][1]) else 0 for t in quarters]
peak_day = np.diag(peak_indicator)
part_peak = np.diag(part_peak_indicator)
off_peak_weekday = np.diag(off_peak_indicator)
off_peak_weekend_off = np.zeros([N,N]) # used for peak, part_peak
off_peak_weekend_on = np.diag([1]*N) # used for off_peak
# each of these will block_diag 5 week day indicators and 2 weekend indicators
self.peak_mat = block_diag(peak_day, peak_day, peak_day, peak_day, peak_day,
off_peak_weekend_off, off_peak_weekend_off)
self.part_peak_mat = block_diag(part_peak, part_peak, part_peak, part_peak, part_peak,
off_peak_weekend_off,off_peak_weekend_off)
self.off_peak_mat = block_diag(off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday, off_peak_weekday,
off_peak_weekend_on, off_peak_weekend_on)
self.all_peak_mat = np.eye(self.horizon)
示例5: mps_add
def mps_add(*args):
'''
Add <KMPS>.
Parameters
-----------
args:
<MPS> instances to be added.
'''
if len(args)<=1:
raise ValueError('At least 2 args is required.')
AL=[]
BL=[]
hndim=args[0].hndim
na=len(args[0].AL)
nb=len(args[0].BL)
nsite=na+nb
for i in xrange(na):
if i==0:
ai=[concatenate([mps.AL[i][j] for mps in args],axis=1) for j in xrange(hndim)]
elif i==nsite-1:
ai=[concatenate([mps.AL[i][j] for mps in args],axis=0) for j in xrange(hndim)]
else:
ai=[block_diag(*[mps.AL[i][j] for mps in args]) for j in xrange(hndim)]
AL.append(ai)
for i in xrange(nb):
if i+na==0:
bi=[concatenate([mps.BL[i][j] for mps in args],axis=1) for j in xrange(hndim)]
elif i+na==nsite-1:
bi=[concatenate([mps.BL[i][j] for mps in args],axis=0) for j in xrange(hndim)]
else:
bi=[block_diag(*[mps.BL[i][j] for mps in args]) for j in xrange(hndim)]
BL.append(bi)
S=concatenate([mps.S for mps in args])
return args[0].__class__(AL=AL,BL=BL,S=S)
示例6: compute_energy
def compute_energy(self):
S = np.matrix(la.block_diag(self.S, self.S))
T = np.matrix(la.block_diag(self.T, self.T))
V = np.matrix(la.block_diag(self.V, self.V))
D = np.matrix(np.zeros((self.nsbf, self.nsbf)))
X = np.matrix(la.inv(la.sqrtm(S)))
h = T + V
E0 = 0
for count in range(self.maxiter):
F = h + self.vu
Ft = X * F * X
e, Ct = la.eigh(Ft)
C = X * np.matrix(Ct)
OC = np.matrix(C[:,:self.nelec])
D = OC*OC.T
self.vu = np.einsum('upvq,qp->uv', self.G, D)
E1 = np.sum((np.array(h) + 0.5 * np.array(self.vu))*np.array(D.T)) + self.V_nuc
psi4.print_out('Iteration {:<d} {:.10f} {:.10f}\n'.format(count, E1, E1-E0))
if abs(E1 - E0) < self.e_convergence:
psi4.print_out('\nFinal HF Energy: {:<5.10f}'.format(E1))
self.C = C
self.epsi = e
self.ehf = E1
break
else:
E0 = E1
else:
psi4.print_out('\n:( Does not converge :(')
示例7: test_scalar_and_1d_args
def test_scalar_and_1d_args(self):
a = block_diag(1)
assert_equal(a.shape, (1,1))
assert_array_equal(a, [[1]])
a = block_diag([2,3], 4)
assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
示例8: get_first_state
def get_first_state(dt, c1_ts, c1_tfs, c2_ts, c2_tfs, hy_ts, hy_tfs):
"""
Return the first state (mean, covar) to initialize the kalman filter with.
Assumes that the time-stamps start at a common zero (are on the same time-scale).
Returns a state b/w t=[0, dt]
Gives priority to AR-markers. If no ar-markers are found in [0,dt], it returns
hydra's estimate but with larger covariance.
"""
ar1 = [c1_tfs[i] for i in xrange(len(c1_ts)) if c1_ts[i] <= dt]
ar2 = [c2_tfs[i] for i in xrange(len(c2_ts)) if c2_ts[i] <= dt]
hy = [hy_tfs[i] for i in xrange(len(hy_ts)) if hy_ts[i] <= dt]
if ar1 != [] or ar2 != []:
ar1.extend(ar2)
x0 = state_from_tfms_no_velocity([avg_transform(ar1)])
I3 = np.eye(3)
S0 = scl.block_diag(1e-3*I3, 1e-2*I3, 1e-3*I3, 1e-3*I3)
else:
assert len(hy)!=0, colorize("No transforms found for KF initialization. Aborting.", "red", True)
x0 = state_from_tfms_no_velocity([avg_transform(hy)])
I3 = np.eye(3)
S0 = scl.block_diag(1e-1*I3, 1e-1*I3, 1e-2*I3, 1e-2*I3)
return (x0, S0)
示例9: _update_z_dist2
def _update_z_dist2(self, g, beta, ifx, lam, alf, mu_ivp):
gp = self.latentforces[0]
Cz = [gp.kernel(self.ttc[:, None])]*self.dim.K
Lz = []
for c in Cz:
c[np.diag_indices_from(c)] += 1e-5
Lz.append(np.linalg.cholesky(c))
Cz_inv = block_diag(*[cho_solve((L, True),
np.eye(L.shape[0]))
for L in Lz])
K = self._K(g, beta, ifx)
# parameters for the LDS update
q = 0
Sigma = np.eye(self.N_data[q]*self.dim.K) / alf
y = self.y_train_[q]
Gamma_inv = np.eye(self.dim.N*self.dim.K) * alf # + Cz_inv / lam
A = K #Gamma.dot(K)
C = np.zeros((self.N_data[q]*self.dim.K,
self.dim.N*self.dim.K))
inds = self.data_inds[q]
inds = np.concatenate([inds + self.dim.N*k
for k in range(self.dim.K)])
for i in range(C.shape[0]):
C[i, inds[i]] += 1
Cz = block_diag(*Cz)
Sigma = 0.01*C.dot(Cz.dot(C.T))
Gamma = np.eye(self.dim.N*self.dim.K) / alf
#Gamma = 0.01*np.linalg.inv(Cz_inv) #np.linalg.inv(Gamma_inv)
u1 = np.kron(mu_ivp[0, 0, :], np.ones(self.dim.N))
u1 = np.zeros(self.dim.N*self.dim.K)
V1 = np.ones((self.dim.N*self.dim.K,self.dim.N*self.dim.K))
V1 = 100.*np.eye(V1.shape[0])
P1 = A.dot(V1.dot(A.T)) + Gamma
K2 = P1.dot(C.T.dot(np.linalg.inv(C.dot(P1.dot(C.T)) + Sigma)))
u2 = A.dot(u1) + K2.dot(y - C.dot(A.dot(u1)))
V2 = (np.eye(K2.shape[0]) - K2.dot(C)).dot(P1)
J1 = V1.dot(A.T.dot(np.linalg.inv(P1)))
u1h = u1 + J1.dot(u2 - A.dot(u1))
V1h = V1 + J1.dot(V2 - P1).dot(J1.T)
means = (u1h, u2)
covs = (V1h, V2)
pwcovs = (J1.dot(V2), )
return means, covs, pwcovs
示例10: incre_svd
def incre_svd():
""" Incremental SVD generator, see
Matthew Brand, Incremental singular value decomposition of uncertain data with missing values
http://www.cs.wustl.edu/~zhang/teaching/cs517/Spring12/CourseProjects/incremental%20svd%20missing%20value.pdf
"""
c = yield
s = np.array([npl.norm(c.astype(float))])
# s = npl.norm(c.astype(float), axis=1)
U0 = c / s
Up = 1.0
V0 = 1.0
Vp = 1.0
Vpi = 1.0
while True:
r = len(s)
U = np.dot(U0, Up)
V = np.dot(V0, Vp)
c = yield U, s, V
if c is None:
continue
I = np.identity(r)
O = np.zeros(r)
l = np.dot(U.T, c)
j = c - np.dot(U, l)
k = npl.norm(j)
j /= k
print(k)
if k < trunc:
k = 0
Q = block_diag(np.diag(s), k)
Q[:r, -1:] = l
A, s, B = npl.svd(Q, full_matrices=False)
B = B.T
if k < trunc:
s = s[:-1]
Up = np.dot(Up, A[:-1, :-1])
W, w = np.vsplit(B[:, :-1], [r])
Wi = (I + np.dot(w.T, w) / (1 - np.dot(w, w.T))).dot(W.T)
Vp = np.dot(Vp, W)
Vpi = np.dot(Wi, Vpi)
V0 = np.vstack((V0, np.dot(w, Vpi)))
else:
Up = block_diag(Up, 1).dot(A)
U0 = np.hstack((U0, j))
V0 = block_diag(V0, 1)
Vp = np.dot(block_diag(Vp, 1), B)
Vpi = np.dot(B.T, block_diag(Vpi, 1))
示例11: initialize_covariances
def initialize_covariances(freq, demo_dir):
"""
Initialize empirical estimates of covariances:
-- Cameras and the hydra observe just the xyzrpy (no velocities).
-- Motion covariance is for all 12 variables.
"""
cam_types = get_cam_types(demo_dir)
cam_tfms = get_cam_tfms(demo_dir)
rgbd_cam_xyz_std = [0.005, 0.005, 0.005] # 1cm
rgb_cam_xyz_std = [0.2, 0.2, 0.4] # 1cm
hydra_xyz_std = [0.03, 0.03, 0.03] # 3cm <-- use small std after tps-correction.
rgbd_cam_rpy_std = np.deg2rad(30)
rgb_cam_rpy_std = np.deg2rad(90)
hydra_rpy_std = np.deg2rad(5)
I3 = np.eye(3)
rgbd_covar = scl.block_diag(np.diag(np.square(rgbd_cam_xyz_std)), np.square(rgbd_cam_rpy_std)*I3)
rgb_covar = scl.block_diag(np.diag(np.square(rgb_cam_xyz_std)), np.square(rgb_cam_rpy_std)*I3)
hydra_covar = scl.block_diag(np.diag(np.square(hydra_xyz_std)), np.square(hydra_rpy_std)*I3)
cam_covars = {}
for cam in cam_types:
print cam
if cam == 'camera1':
if cam_types[cam] == 'rgb':
cam_covars[cam] = rgb_covar
else:
cam_covars[cam] = rgbd_covar
else:
for i in xrange(len(cam_tfms)):
tfm_info = cam_tfms[i]
if tfm_info['parent'] == 'camera1_link' and tfm_info['child'] == '%s_link'%(cam):
# tfm_info is from camera link to camera link
tfm_rof1_rof2 = nlg.inv(tfm_link_rof).dot(tfm_info['tfm']).dot(tfm_link_rof)
R = scl.block_diag(tfm_rof1_rof2[:3,:3], I3)
#R = scl.block_diag(I3, I3)
if cam_types[cam] == 'rgb':
cam_covars[cam] = R.dot(rgb_covar).dot(R.transpose())
else:
cam_covars[cam] = R.dot(rgbd_covar).dot(R.transpose())
break
motion_covar = initialize_motion_covariance(freq)
return (motion_covar, cam_covars, hydra_covar)
示例12: update_matrix_hogwild
def update_matrix_hogwild(J,partition,q):
n = J.shape[0]
A,Bs,Cs = split_hogwild(J,partition)
BinvCs = [np.linalg.solve(B,C) for B,C in zip(Bs,Cs)]
BinvCqs = [np.linalg.matrix_power(BinvC,q) for BinvC in BinvCs]
BinvC = block_diag(*BinvCs)
BinvCq = block_diag(*BinvCqs)
BinvA = np.vstack([np.linalg.solve(B,A[indices,:]) for B,indices in zip(Bs,partition)])
# TODO write this with (B-C)^{-1} A
return BinvCq + (np.eye(n) - BinvCq).dot(np.linalg.solve(np.eye(n) - BinvC, BinvA))
示例13: kepler_3d
def kepler_3d(params,t):
"""One-body Kepler problem in 3D.
This function simply uses kepler_2d and rotates it into 3D.
"""
a = params.a
pb = params.pb
eps1 = params.eps1
eps2 = params.eps2
i = params.i
lan = params.lan
p2 = Kepler2DParameters(a=a, pb=pb, eps1=eps1, eps2=eps2, t0=params.t0)
xv, jac = kepler_2d(p2,t)
xyv = np.zeros(6)
xyv[:2] = xv[:2]
xyv[3:5] = xv[2:]
jac2 = np.zeros((6,8))
t = np.zeros((6,6))
t[:2] = jac[:2]
t[3:5] = jac[2:]
jac2[:,:4] = t[:,:4]
jac2[:,-2:] = t[:,-2:]
r_i = np.array([[1,0,0],
[0,np.cos(i),-np.sin(i)],
[0,np.sin(i), np.cos(i)]])
d_r_i = np.array([[0,0,0],
[0,-np.sin(i),-np.cos(i)],
[0, np.cos(i),-np.sin(i)]])
r_i_6 = block_diag(r_i,r_i)
d_r_i_6 = block_diag(d_r_i,d_r_i)
xyv3 = np.dot(r_i_6,xyv)
jac3 = np.dot(r_i_6,jac2)
jac3[:,4] += np.dot(d_r_i_6, xyv)
r_lan = np.array([[ np.cos(lan),np.sin(lan),0],
[-np.sin(lan),np.cos(lan),0],
[0,0,1]])
d_r_lan = np.array([[-np.sin(lan), np.cos(lan),0],
[-np.cos(lan),-np.sin(lan),0],
[0,0,0]])
r_lan_6 = block_diag(r_lan,r_lan)
d_r_lan_6 = block_diag(d_r_lan,d_r_lan)
xyv4 = np.dot(r_lan_6,xyv3)
jac4 = np.dot(r_lan_6,jac3)
jac4[:,5] += np.dot(d_r_lan_6, xyv3)
return xyv4, jac4
示例14: compute_giwc_from_wrenches
def compute_giwc_from_wrenches(full=True):
"""
Compute Gravito-Inertial Wrench Cone (GIWC) from Contact Wrench Cones
(CWCs).
"""
global CWC_all
A = calculate_local_wrench_to_gi_matrix()
# right vector of CWC_all is the stacked vector w_all of
# contact wrenches in the *world* frame
CWC_all = block_diag(*[
dot(CWC, block_diag(contact.R.T, contact.R.T))
for contact in contacts])
S = span_of_face(CWC_all)
F = face_of_span(dot(A, S))
return F
示例15: propagateRLHamiltonian
def propagateRLHamiltonian(t, k, omega, delta, epsilon, U, n):
t=np.array(t)
psi0=rampedOnPsi(k,omega,delta,epsilon,U,n)
# Energy1, V1 = LA.eig(RamanLatHamiltonian(0.0, 0.0, 0.0, 0.0, U, n))
# sort=np.argsort(Energy1)
# V1sorted=V1[:,sort]
# psi0=V1sorted[:,0]
# psi0[np.divide(3*n,2)]=1.0+0.0*1j
H = RamanLatHamiltonian(k, omega, delta ,epsilon,U,n)
Energy, V = LA.eig(H)
V = V + 1j*0.0
Vinv = np.conjugate(np.transpose(V))
# np.outer(t, Energy).flatten() creates a matrix for all t
U = np.diag(np.exp(-1j*np.outer(t, Energy).flatten()))
a = np.dot(Vinv, psi0)
# This repeats a so that the shape is consitent with U
aa = np.outer(np.ones(t.size),a).flatten()
# Have to add the transpose to make shapes match
b = np.dot(U, aa)
# Same block diagonal trick for eigenvector matrix
VV = sLA.block_diag(*([V]*t.size))
psi = np.dot(VV, b)
pops=np.absolute(psi)**2.0
# Since you want the first value, need to take every 3rd row
# and extract the values you want from the diagonal
latPops=np.sum(pops.reshape(t.size,n,3)[:,np.divide(n,2)-1:np.divide(n,2)+2,:],axis=2).flatten()
#populations in the -2k_L, 0, and +2k_L lattice sites, summed over spin sites,in time step blocks
spinPops=np.sum(pops.reshape(t.size,n,3),axis=1).flatten()
#populations in each spin state, summed over lattice sites, in time step blocks
return spinPops