本文整理汇总了Python中autograd.numpy.diag方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.diag方法的具体用法?Python numpy.diag怎么用?Python numpy.diag使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.diag方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def __init__(self, generate_diagonal, matrix_class):
matrix_pairs = {}
rng = np.random.RandomState(SEED)
for sz in SIZES:
diagonal = generate_diagonal(sz, rng)
matrix_pairs[sz] = (matrix_class(diagonal), np.diag(diagonal))
if AUTOGRAD_AVAILABLE:
def param_func(param, matrix):
return anp.diag(param)
def get_param(matrix):
return matrix.diagonal
else:
param_func, get_param = None, None
super().__init__(matrix_pairs, get_param, param_func, rng)
示例2: fit_gaussian_draw
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
示例3: multivariate_normal_density
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def multivariate_normal_density(mean, cov, X):
"""
Exact density (not log density) of a multivariate Gaussian.
mean: length-d array
cov: a dxd covariance matrix
X: n x d 2d-array
"""
evals, evecs = np.linalg.eigh(cov)
cov_half_inv = evecs.dot(np.diag(evals**(-0.5))).dot(evecs.T)
# print(evals)
half_evals = np.dot(X-mean, cov_half_inv)
full_evals = np.sum(half_evals**2, 1)
unden = np.exp(-0.5*full_evals)
Z = np.sqrt(np.linalg.det(2.0*np.pi*cov))
den = unden/Z
assert len(den) == X.shape[0]
return den
示例4: moran_transition
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def moran_transition(t, n):
assert t >= 0.0
P, d, Pinv = moran_eigensystem(n)
D = diag(exp(t * d))
return check_probs_matrix(dot(P, dot(D, Pinv)))
示例5: rate_matrix
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def rate_matrix(n, sparse_format="csr"):
i = np.arange(n + 1)
diag = i * (n - i) / 2.
diags = [diag[:-1], -2 * diag, diag[1:]]
M = scipy.sparse.diags(
diags, [1, 0, -1], (n + 1, n + 1), format=sparse_format)
return M
示例6: ll_m2_exact
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def ll_m2_exact(muw, Sigw, Siginv, x):
L = np.linalg.cholesky(Siginv)
Rho = np.dot(np.dot(L.T, Sigw), L)
crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()
mu = np.dot(L.T, (x - muw).T).T
musq = (mu**2).sum(axis=1)
return 0.25*(crho + musq*musq[:, np.newaxis] + np.diag(Rho).sum()*(musq + musq[:,np.newaxis]) + 4*np.dot(np.dot(mu, Rho), mu.T))
#Var[Log N(x;, mu, Sig)] under mu ~ N(muw, Sigw)
示例7: ll_m2_exact_diag
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def ll_m2_exact_diag(muw, Sigw, Siginv, x):
L = np.linalg.cholesky(Siginv)
Rho = np.dot(np.dot(L.T, Sigw), L)
crho = 2*(Rho**2).sum() + (np.diag(Rho)*np.diag(Rho)[:, np.newaxis]).sum()
mu = np.dot(L.T, (x - muw).T).T
musq = (mu**2).sum(axis=1)
return 0.25*(crho + musq**2 + 2*np.diag(Rho).sum()*musq + 4*(np.dot(mu, Rho)*mu).sum(axis=1))
示例8: log_prop
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def log_prop(self, t, Xc, Xp, y, prop_params, model_params):
mu0, Sigma0, A, Q, C, R = model_params
mut, lint, log_s2t = prop_params[t]
s2t = np.exp(log_s2t)
if t > 0:
mu = mut + np.dot(A, Xp.T).T*lint
else:
mu = mut + lint*mu0
return self.log_normal(Xc, mu, np.diag(s2t))
示例9: callback
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def callback(params, t, g):
print("Iteration {} lower bound {}".format(t, -objective(params, t)))
plt.cla()
target_distribution = lambda x : np.exp(log_density(x, t))
plot_isocontours(ax, target_distribution)
mean, log_std = unpack_params(params)
variational_contour = lambda x: mvn.pdf(x, mean, np.diag(np.exp(2*log_std)))
plot_isocontours(ax, variational_contour)
plt.draw()
plt.pause(1.0/30.0)
示例10: test_diag
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def test_diag(): combo_check(np.diag, [0])([R(5, 5)], k=[-1, 0, 1])
示例11: test_diag_flat
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def test_diag_flat():combo_check(np.diag, [0])([R(5)], k=[-1, 0, 1])
示例12: test_diag
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def test_diag():
def fun(x): return np.diag(x)
mat = npr.randn(10, 10)
check_grads(fun)(mat)
示例13: test_make_diagonal
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def test_make_diagonal():
def fun(D):
return np.make_diagonal(D, axis1=-1, axis2=-2)
D = np.random.randn(4)
A = np.make_diagonal(D, axis1=-1, axis2=-2)
assert np.allclose(np.diag(A), D)
check_grads(fun)(D)
D = np.random.randn(3, 4)
A = np.make_diagonal(D, axis1=-1, axis2=-2)
assert all([np.allclose(np.diag(A[i]), D[i]) for i in range(3)])
check_grads(fun)(D)
示例14: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def __init__(self, mean, cov):
"""
mean: a numpy array of length d.
cov: d x d numpy array for the covariance.
"""
self.mean = mean
self.cov = cov
assert mean.shape[0] == cov.shape[0]
assert cov.shape[0] == cov.shape[1]
E, V = np.linalg.eigh(cov)
if np.any(np.abs(E) <= 1e-7):
raise ValueError('covariance matrix is not full rank.')
# The precision matrix
self.prec = np.dot(np.dot(V, np.diag(old_div(1.0,E))), V.T)
#print self.prec
示例15: likelihood
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import diag [as 别名]
def likelihood(self, hyp):
M = self.M
Z = self.Z
m = self.m
S = self.S
X_batch = self.X_batch
y_batch = self.y_batch
jitter = self.jitter
jitter_cov = self.jitter_cov
N = X_batch.shape[0]
logsigma_n = hyp[-1]
sigma_n = np.exp(logsigma_n)
# Compute K_u_inv
K_u = kernel(Z, Z, hyp[:-1])
# K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)
K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
self.K_u_inv = K_u_inv
# Compute mu
psi = kernel(Z, X_batch, hyp[:-1])
K_u_inv_m = np.matmul(K_u_inv,m)
MU = np.matmul(psi.T,K_u_inv_m)
# Compute cov
Alpha = np.matmul(K_u_inv,psi)
COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
np.matmul(Alpha.T, np.matmul(S,Alpha))
COV_inv = np.linalg.solve(COV + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
# L = np.linalg.cholesky(COV + np.eye(N)*sigma_n + np.eye(N)*jitter)
# COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
# Compute cov(Z, X)
cov_ZX = np.matmul(S,Alpha)
# Update m and S
alpha = np.matmul(COV_inv, cov_ZX.T)
m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))
S = S - np.matmul(cov_ZX, alpha)
self.m = m
self.S = S
# Compute NLML
K_u_inv_m = np.matmul(K_u_inv,m)
NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
return NLML[0,0]