本文整理汇总了Python中numpy.trace函数的典型用法代码示例。如果您正苦于以下问题:Python trace函数的具体用法?Python trace怎么用?Python trace使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了trace函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: step
def step(self, x, last_b):
# initialize
m = len(x)
mu = np.matrix(last_b).T
sigma = self.sigma
theta = self.theta
eps = self.eps
x = np.matrix(x).T # matrices are easier to manipulate
# 4. Calculate the following variables
M = mu.T * x
V = x.T * sigma * x
x_upper = sum(diag(sigma) * x) / trace(sigma)
# 5. Update the portfolio distribution
mu, sigma = self.update(x, x_upper, mu, sigma, M, V, theta, eps)
# 6. Normalize mu and sigma
mu = tools.simplex_proj(mu)
sigma = sigma / (m**2 * trace(sigma))
"""
sigma(sigma < 1e-4*eye(m)) = 1e-4;
"""
self.sigma = sigma
return mu
示例2: fid
def fid(target_unitary, error_channel_operators, density_matrix, symbolic=1):
"""Fidelity between a unitary gate and a non-necessarily unitary gate,
for a given initial density matrix. This is later used when calculating
the worst case fidelity.
Notice that the input format of the general channel is a list of Kraus
operators instead of a process matrix. The input format of the target
unitary is just the matrix itself, not its process matrix.
symbolic = 1 is the case when the the input matrices are sympy,
while symbolic = 0 is used when the input matrices are numpy.
"""
V, K, rho = target_unitary, error_channel_operators, density_matrix
if symbolic:
Tra = (((V.H)*K[0])*rho).trace()
fid = Tra*(fun.conjugate(Tra))
for i in range(1,len(K)):
Tra = (((V.H)*K[i])*rho).trace()
fid += Tra*(fun.conjugate(Tra))
return fid.expand()
else:
Tra = np.trace((V.H)*K[0]*rho)
fid = Tra*(Tra.conjugate())
for i in range(1,len(K)):
Tra = np.trace((V.H)*K[i]*rho)
fid += Tra*(Tra.conjugate())
return fid
示例3: test_mapping_cost
def test_mapping_cost(
self,
other,
bend_coef=DEFAULT_LAMBDA[1],
outlierprior=1e-1,
outlierfrac=1e-2,
outliercutoff=1e-2,
T=5e-3,
norm_iters=DEFAULT_NORM_ITERS,
):
mapping_err = self.mapping_cost(other, outlierprior, outlierfrac, outliercutoff, T, norm_iters)
for i in range(self.N):
## compute error for 0 on cpu
s_gpu = mapping_err[i]
s_cpu = np.float32(0)
xt = self.pts_t[i].get()
xw = self.pts_w[i].get()
yt = other.pts_t[i].get()
yw = other.pts_w[i].get()
##use the trace b/c then numpy will use float32s all the way
s_cpu += np.trace(xt.T.dot(xt) + xw.T.dot(xw) - 2 * xw.T.dot(xt))
s_cpu += np.trace(yt.T.dot(yt) + yw.T.dot(yw) - 2 * yw.T.dot(yt))
if not np.isclose(s_cpu, s_gpu, atol=1e-4):
## high err tolerance is b/c of difference in cpu and gpu precision?
print "cpu and gpu sum sq differences differ!!!"
ipy.embed()
sys.exit(1)
示例4: __update_tau
def __update_tau(self, X):
"""
Update b_tau_tilde, as a_tau_tilde is independent of other update rules
b_tau_tilde = b_tau + 1/2 sum ( Z )
where Z =
|| X_n ||^2 + <|| mu ||^2> + Tr(<W.T * W> <z_n * z_n.T>) +
2*<mu.T> * <W> * <z_n> - 2 * X_n.T * <W> * <z_n> - 2 * X_n.T * <mu>
"""
x_norm_sq = np.power(np.linalg.norm(X, axis=0), 2)
# <|mu|^2> = <mu.T mu> = Tr(Sigma_mu) + mean_mu.T mean_mu
exp_mu_norm_sq = np.trace(self.sigma_mu) + np.dot(self.mean_mu.T, self.mean_mu)
exp_mu_norm_sq = exp_mu_norm_sq[0] # reshape from (1,1) to (1,)
# TODO what is <W.T W>
exp_w = self.means_w
exp_wt_w = np.dot(exp_w.T, exp_w) # TODO fix
exp_z_zt = self.N * self.sigma_z + np.dot(self.means_z, self.means_z.T)
trace_w_z = np.trace(np.dot(exp_wt_w, exp_z_zt))
mu_w_z = np.dot(np.dot(self.mean_mu.T, self.means_w), self.means_z)
x_w_z = np.dot(X.T, self.means_w).T * self.means_z
x_mu = np.dot(X.T, self.mean_mu)
big_sum = np.sum(x_norm_sq) + self.N * exp_mu_norm_sq + trace_w_z + \
2*np.sum(mu_w_z) - 2*np.sum(x_w_z) - 2*np.sum(x_mu)
self.b_tau_tilde = self.b_tau + 0.5*big_sum
示例5: confmat
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((self.nData,1))),axis=1)
outputs = np.dot(inputs,self.weights)
nClasses = np.shape(targets)[1]
if nClasses==1:
nClasses = 2
outputs = np.where(outputs>0,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nClasses,nClasses))
for i in range(nClasses):
for j in range(nClasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print cm
print np.trace(cm)/np.sum(cm)
示例6: grad_log_like
def grad_log_like(phis, *args):
x_train, t_train = args
#init the matrices for the derivatives of each phi according to each pair of data points
dert0 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert1 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert2 = np.zeros((x_train.shape[0], x_train.shape[0]))
dert3 = np.zeros((x_train.shape[0], x_train.shape[0]))
#vector of the final result of the derivatives
der = np.zeros_like(phis)
K = computeK_opt(x_train, x_train, phis)
C = computeC(K, beta)
invC = np.linalg.inv(C)
for i in range(len(x_train)):
for j in range(len(x_train)):
dert0[i,j] = np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*np.exp(phis[0])
dert1[i,j] = -0.5*np.exp(phis[0])*np.exp((-np.exp(phis[1])/2)*((x_train[i] - x_train[j])**2))*((x_train[i] - x_train[j])**2)*np.exp(phis[1])
dert2[i,j] = np.exp(phis[2])
dert3[i,j] = x_train[i]*x_train[j]*np.exp(phis[3])
# get the derivatives of the negative log-likelihood
der[0] = -(((-1/2)*np.trace(np.dot(invC, dert0))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert0), invC),t_train)))
der[1] = -(((-1/2)*np.trace(np.dot(invC, dert1))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert1), invC),t_train)))
der[2] = -(((-1/2)*np.trace(np.dot(invC, dert2))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert2), invC),t_train)))
der[3] = -(((-1/2)*np.trace(np.dot(invC, dert3))) + ((1/2)*np.dot(np.dot(np.dot(np.dot(t_train.T, invC),dert3), invC),t_train)))
return der
示例7: _fit
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
示例8: m_step_Q
def m_step_Q(emd, stationary):
"""
Computes the optimised state-transition covariance hyperparameters `Q' of
the natural parameters of the posterior distributions over time. Here
just one single scalar is considered
:param container.EMData emd:
All data pertaining to the EM algorithm.
:param stationary:
If 'all' stationary on all thetas is assumed.
"""
inv_lmbda = 0
if emd.param_est_eta == 'exact':
for i in range(1, emd.T):
lag_one_covariance = emd.sigma_s_lag[i, :, :]
tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
inv_lmbda += numpy.trace(emd.sigma_s[i, :, :]) - \
2 * numpy.trace(lag_one_covariance) + \
numpy.trace(emd.sigma_s[i - 1, :, :]) + \
numpy.dot(tmp, tmp)
emd.Q = inv_lmbda / emd.D / (emd.T - 1) * numpy.identity(emd.D)
else:
for i in range(1, emd.T):
lag_one_covariance = emd.sigma_s_lag[i, :]
tmp = emd.theta_s[i, :] - emd.theta_s[i - 1, :]
inv_lmbda += numpy.sum(emd.sigma_s[i]) - \
2 * numpy.sum(lag_one_covariance) + \
numpy.sum(emd.sigma_s[i - 1]) + \
numpy.dot(tmp, tmp)
emd.Q = inv_lmbda / emd.D / (emd.T - 1) * \
numpy.identity(emd.D)
if stationary == 'all':
emd.Q = numpy.zeros(emd.Q.shape)
示例9: maxwell_sihvola
def maxwell_sihvola(self,dielectric_medium,dielecv,shape,L,vf) :
"""Calculate the effective constant permittivity using the maxwell garnett method
dielectric_medium is the dielectric constant tensor of the medium
dielecv is the total frequency dielectric constant tensor at the current frequency
shape is the name of the current shape
L is the shapes depolarisation matrix
vf is the volume fraction of filler
The routine returns the effective dielectric constant"""
# Equation 6.29 on page 123 of Sihvola
# Equation 6.40 gives the averaging over the orientation function
# See also equation 5.80 on page 102 and equation 4.31 on page 70
Me = dielectric_medium
# assume that the medium is isotropic calculate the inverse of the dielectric
Mem1 = 3.0 / np.trace(Me)
Mi = dielecv
# calculate the polarisability matrix x the number density of inclusions
nA = vf*np.dot( (Mi-Me), np.linalg.inv( self.unit + (Mem1 * np.dot(L, (Mi - Me)))))
nAL = np.dot((nA),L)
# average the polarisability over orientation
nA = np.trace(nA) / 3.0 * self.unit
# average the polarisability*L over orientation
nAL = np.trace(nAL) / 3.0 * Mem1 * self.unit
# Calculate the average polarisation factor which scales the average field
# based on equation 5.80
# <P> = pol . <E>
pol = np.dot(np.linalg.inv(self.unit - nAL), nA)
# Meff . <E> = Me . <E> + <P>
# Meff . <E> = Me. <E> + pol . <E>
# Meff = Me + pol
effd = dielectric_medium + pol
# Average over orientation
trace = np.trace(effd) / 3.0
effdielec = np.array ( [ [trace, 0, 0], [0,trace,0], [0,0,trace] ] )
return effdielec
示例10: test_pullback
def test_pullback(self):
(D,P,N) = 2,5,10
A_data = numpy.zeros((D,P,N,N))
for d in range(D):
for p in range(P):
tmp = numpy.random.rand(N,N)
A_data[d,p,:,:] = numpy.dot(tmp.T,tmp)
if d == 0:
A_data[d,p,:,:] += N * numpy.diag(numpy.random.rand(N))
A = UTPM(A_data)
l,Q = UTPM.eigh(A)
L_data = UTPM._diag(l.data)
L = UTPM(L_data)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
lbar = UTPM(numpy.random.rand(*(D,P,N)))
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = UTPM._diag(lbar.data)[0,0]
Ldot = UTPM._diag(l.data)[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例11: test_pullback_repeated_eigenvalues
def test_pullback_repeated_eigenvalues(self):
D,P,N = 2,1,6
A = UTPM(numpy.zeros((D,P,N,N)))
V = UTPM(numpy.random.rand(D,P,N,N))
A.data[0,0] = numpy.diag([2,2,3,3.,4,5])
A.data[1,0] = numpy.diag([5,1,3,1.,1,3])
V,Rtilde = UTPM.qr(V)
A = UTPM.dot(UTPM.dot(V.T, A), V)
l,Q = UTPM.eigh(A)
L_data = UTPM._diag(l.data)
L = UTPM(L_data)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
lbar = UTPM(numpy.random.rand(*(D,P,N)))
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = UTPM._diag(lbar.data)[0,0]
Ldot = UTPM._diag(l.data)[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例12: test_eigh1_pushforward
def test_eigh1_pushforward(self):
(D,P,N) = 2,1,2
A = UTPM(numpy.zeros((D,P,N,N)))
A.data[0,0] = numpy.eye(N)
A.data[1,0] = numpy.diag([3,4])
L,Q,b = UTPM.eigh1(A)
assert_array_almost_equal(UTPM.dot(Q, UTPM.dot(L,Q.T)).data, A.data, decimal = 13)
Lbar = UTPM.diag(UTPM(numpy.zeros((D,P,N))))
Lbar.data[0,0] = [0.5,0.5]
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh1( Lbar, Qbar, None, A, L, Q, b)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = Lbar.data[0,0]
Ldot = L.data[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
assert_almost_equal(numpy.trace(numpy.dot(Abar.T, Adot)), numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot)))
示例13: test_det_ovlp
def test_det_ovlp(self):
mf = scf.UHF(mol)
mf.scf()
s, x = mf.det_ovlp(mf.mo_coeff, mf.mo_coeff, mf.mo_occ, mf.mo_occ)
self.assertAlmostEqual(s, 1.000000000, 9)
self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[0]*1.000000000, 9)
self.assertAlmostEqual(numpy.trace(x[0]), mf.nelec[1]*1.000000000, 9)
示例14: grad_nlogprob
def grad_nlogprob(hypers):
amp2 = np.exp(hypers[0])
noise = np.exp(hypers[1])
ls = np.exp(hypers[2:])
chol, corr, grad_corr = memoize(amp2, noise, ls)
solve = spla.cho_solve((chol, True), diffs)
inv_cov = spla.cho_solve((chol, True), np.eye(chol.shape[0]))
jacobian = np.outer(solve, solve) - inv_cov
grad = np.zeros(self.D + 2)
# Log amplitude gradient.
grad[0] = 0.5 * np.trace(np.dot( jacobian, corr + 1e-6*np.eye(chol.shape[0]))) * amp2
# Log noise gradient.
grad[1] = 0.5 * np.trace(np.dot( jacobian, np.eye(chol.shape[0]))) * noise
# Log length scale gradients.
for dd in xrange(self.D):
grad[dd+2] = 1 * np.trace(np.dot( jacobian, -amp2*grad_corr[:,:,dd]*comp[:,dd][:,np.newaxis]/(np.exp(ls[dd]))))*np.exp(ls[dd])
# Roll in the prior variance.
#grad -= 2*hypers/self.hyper_prior
return -grad
示例15: __calcMergeCost
def __calcMergeCost(self, weightA, meanA, precA, weightB, meanB, precB):
"""Calculates and returns the cost of merging two Gaussians."""
# (For anyone wondering about the fact we are comparing them against each other rather than against the result of merging them that is because this way tends to get better results.)
# The log determinants and delta...
logDetA = math.log(numpy.linalg.det(precA))
logDetB = math.log(numpy.linalg.det(precB))
delta = meanA - meanB
# Kullback-Leibler of representing A using B...
klA = logDetB - logDetA
klA += numpy.trace(numpy.dot(precB, numpy.linalg.inv(precA)))
klA += numpy.dot(numpy.dot(delta, precB), delta)
klA -= precA.shape[0]
klA *= 0.5
# Kullback-Leibler of representing B using A...
klB = logDetA - logDetB
klB += numpy.trace(numpy.dot(precA, numpy.linalg.inv(precB)))
klB += numpy.dot(numpy.dot(delta, precA), delta)
klB -= precB.shape[0]
klB *= 0.5
# Return a weighted average...
return weightA * klA + weightB * klB