本文整理汇总了Python中scipy.outer函数的典型用法代码示例。如果您正苦于以下问题:Python outer函数的具体用法?Python outer怎么用?Python outer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了outer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: svm_gradient_batch_fast
def svm_gradient_batch_fast(X_pred, X_exp, y, X_pred_ids, X_exp_ids, w, C=.0001, sigma=1.):
# sample Kernel
rnpred = X_pred_ids#sp.random.randint(low=0,high=len(y),size=n_pred_samples)
rnexpand = X_exp_ids#sp.random.randint(low=0,high=len(y),size=n_expand_samples)
#K = GaussKernMini_fast(X_pred.T,X_exp.T,sigma)
X1 = X_pred.T
X2 = X_exp.T
if sp.sparse.issparse(X1):
G = sp.outer(X1.multiply(X1).sum(axis=0), sp.ones(X2.shape[1]))
else:
G = sp.outer((X1 * X1).sum(axis=0), sp.ones(X2.shape[1]))
if sp.sparse.issparse(X2):
H = sp.outer(X2.multiply(X2).sum(axis=0), sp.ones(X1.shape[1]))
else:
H = sp.outer((X2 * X2).sum(axis=0), sp.ones(X1.shape[1]))
K = sp.exp(-(G + H.T - 2. * fast_dot(X1.T, X2)) / (2. * sigma ** 2))
# K = sp.exp(-(G + H.T - 2.*(X1.T.dot(X2)))/(2.*sigma**2))
if sp.sparse.issparse(X1) | sp.sparse.issparse(X2): K = sp.array(K)
# compute predictions
yhat = fast_dot(K,w[rnexpand])
# compute whether or not prediction is in margin
inmargin = (yhat * y[rnpred]) <= 1
# compute gradient
G = C * w[rnexpand] - fast_dot((y[rnpred] * inmargin), K)
return G,rnexpand
示例2: sample_moments
def sample_moments( X, k ):
"""Get the sample moments from data"""
N, d = X.shape
# Partition X into two halves to independently estimate M2 and M3
X1, X2 = X[:N/2], X[N/2:]
# Get the moments
M1 = X1.mean(0)
M1_ = X2.mean(0)
M2 = Pairs( X1, X1 )
M3 = lambda theta: TriplesP( X2, X2, X2, theta )
#M3 = Triples( X2, X2, X2 )
# TODO: Ah, not computing sigma2!
# Estimate \sigma^2 = k-th eigenvalue of M2 - mu mu^T
sigma2 = svdvals( M2 - outer( M1, M1 ) )[k-1]
assert( sc.isreal( sigma2 ) and sigma2 > 0 )
# P (M_2) is the best kth rank apprximation to M2 - sigma^2 I
P = approxk( M2 - sigma2 * eye( d ), k )
B = matrix_tensorify( eye(d), M1_ )
T = lambda theta: M3(theta) - sigma2 * ( M1_.dot(theta) * eye( d ) + outer( M1_, theta ) + outer( theta, M1_ ) )
#T = M3 - sigma2 * ( B + B.swapaxes(2, 1) + B.swapaxes(2, 0) )
return P, T
示例3: getHessianEstimate
def getHessianEstimate(self, feature):
# Estimate gradient of state action value function w.r.t. parameters.
n = self.paramdim
# state-action value
# self.qvalue = self.stateActionValue(feature, self.r)
# gradient of the state-action value function w.r.t. the parameters
qgradient = zeros((n,))
for i in xrange(n):
qgradient[i] = self.stateActionValue(feature, self.T[:, i])
# The first n elements in the first-order basis (i.e., \nabla
# \log(\mu)), the following n^2 elements are the second-order basis
# (i.e., \nabla^2 \log(\mu)).
# self.loglhgrad = self.module.decodeFeature(feature, 'first_order')
# self.loglhgrad = self.cacheFeature
loglhhessian = self.module.decodeFeature(feature, 'second_order')
term1 = self.qvalue * (loglhhessian - outer(self.loglhgrad, self.loglhgrad))
term2 = outer(qgradient, self.loglhgrad)
# ATTENTION! This algorighm is designed only for maximization problems
# in which the Hessian matrix is negative semidefinite in the optimal
# point. As a result, the scaling matrix should be -1 * inverse of the
# Hessian to move in the right direction
return - 1 * (term1 + term2 + term2.T)
示例4: calcInvFisher
def calcInvFisher(sigma, invSigma=None, factorSigma=None):
""" Efficiently compute the exact inverse of the FIM of a Gaussian.
Returns a list of the diagonal blocks. """
if invSigma == None:
invSigma = inv(sigma)
if factorSigma == None:
factorSigma = cholesky(sigma)
dim = sigma.shape[0]
invF = [mat(1 / (invSigma[-1, -1] + factorSigma[-1, -1] ** -2))]
invD = 1 / invSigma[-1, -1]
for k in reversed(list(range(dim - 1))):
v = invSigma[k + 1:, k]
w = invSigma[k, k]
wr = w + factorSigma[k, k] ** -2
u = dot(invD, v)
s = dot(v, u)
q = 1 / (w - s)
qr = 1 / (wr - s)
t = -(1 + q * s) / w
tr = -(1 + qr * s) / wr
invF.append(blockCombine([[qr, tr * u], [mat(tr * u).T, invD + qr * outer(u, u)]]))
invD = blockCombine([[q , t * u], [mat(t * u).T, invD + q * outer(u, u)]])
invF.append(sigma)
invF.reverse()
return invF
示例5: diffmat
def diffmat(x):
n= sp.size(x)
e= sp.ones((n,1))
Xdiff= sp.outer(x,e)-sp.outer(e,x)+sp.identity(n)
xprod= -reduce(mul, Xdiff)
W= sp.outer(1/xprod,e)
D= W/sp.multiply(W.T,Xdiff)
d= 1-sum(D)
for k in range(0,n):
D[k,k] = d[k]
return -D.T
示例6: diffmat
def diffmat(x): # x is an ordered array of grid points
n = sp.size(x)
e = sp.ones((n,1))
Xdiff = sp.outer(x,e)-sp.outer(e,x)+sp.identity(n)
xprod = -reduce(mul,Xdiff) # product of rows
W = sp.outer(1/xprod,e)
D = W/sp.multiply(W.T,Xdiff)
d = 1-sum(D)
for k in range(0,n): # Set diagonal elements
D[k,k] = d[k]
return -D.T
示例7: rnd_cov
def rnd_cov(N_d,sigma=0.1):
if type(sigma)==int or type(sigma)==float:
sigma=sigma*sp.rand(N_d)
else:
pass
cov=sp.outer(sigma,sigma)
#correlation matrix
r=2*sp.rand(N_d)-1
rho=sp.outer(r,r)+(1-r**2)*sp.eye(N_d)
cov=rho*cov
return cov
示例8: bptt
def bptt(self, x, t):
"""Back propagation throuth time of a sample.
Reference: [1] Deep Learning, Ian Goodfellow, Yoshua Bengio and Aaron Courville, P385.
"""
dU = sp.zeros_like(self.U)
dW = sp.zeros_like(self.W)
db = sp.zeros_like(self.b)
dV = sp.zeros_like(self.V)
dc = sp.zeros_like(self.c)
tau = len(x)
cells = self.forward_propagation(x)
dh = sp.zeros(self.n_hiddens)
for i in range(tau - 1, -1, -1):
# FIXME:
# 1. Should not use cell[i] since there maybe multiple hidden layers.
# 2. Using exponential family as output should not be specified.
time_input = x[i]
one_hot_t = sp.zeros(self.n_features)
one_hot_t[t[i]] = 1
# Cell of time i
cell = cells[i]
# Hidden layer of current cell
hidden = cell[0]
# Output layer of current cell
output = cell[1]
# Hidden layer of time i + 1
prev_hidden = cells[i - 1][0] if i - 1 >= 0 else None
# Hidden layer of time i - 1
next_hidden = cells[i + 1][0] if i + 1 < tau else None
# Error of current time i
da = hidden.backward()
next_da = next_hidden.backward() if next_hidden is not None else sp.zeros(self.n_hiddens)
prev_h = prev_hidden.h if prev_hidden is not None else sp.zeros(self.n_hiddens)
# FIXME: The error function should not be specified here
# do = sp.dot(output.backward().T, -one_hot_t / output.y)
do = output.y - one_hot_t
dh = sp.dot(sp.dot(self.W.T, sp.diag(next_da)), dh) + sp.dot(self.V.T, do)
# Gradient back propagation through time
dc += do
db += da * dh
dV += sp.outer(do, hidden.h)
dW += sp.outer(da * dh, prev_h)
dU[:, time_input] += da * dh
return (dU, dW, db, dV, dc)
示例9: GaussKernMini_fast
def GaussKernMini_fast(X1,X2,sigma):
if sp.sparse.issparse(X1):
G = sp.outer(X1.multiply(X1).sum(axis=0),sp.ones(X2.shape[1]))
else:
G = sp.outer((X1 * X1).sum(axis=0),sp.ones(X2.shape[1]))
if sp.sparse.issparse(X2):
H = sp.outer(X2.multiply(X2).sum(axis=0),sp.ones(X1.shape[1]))
else:
H = sp.outer((X2 * X2).sum(axis=0),sp.ones(X1.shape[1]))
K = sp.exp(-(G + H.T - 2.*fast_dot(X1.T,X2))/(2.*sigma**2))
# K = sp.exp(-(G + H.T - 2.*(X1.T.dot(X2)))/(2.*sigma**2))
if sp.sparse.issparse(X1) | sp.sparse.issparse(X2): K = sp.array(K)
return K
示例10: diffmat
def diffmat(x):
"""Compute the differentiation matrix for x is an ordered array
of grid points. Uses barycentric formulas for stability.
"""
n = sp.size(x)
e = sp.ones((n,1))
Xdiff = sp.outer(x,e)-sp.outer(e,x)+sp.identity(n)
xprod = -reduce(mul,Xdiff) # product of rows
W = sp.outer(1/xprod,e)
D = W/sp.multiply(W.T,Xdiff)
d = 1-sum(D)
for k in range(0,n): # Set diagonal elements
D[k,k] = d[k]
return -D.T
示例11: test_matrix_tensorify
def test_matrix_tensorify():
"""Test whether this tensorification routine works"""
A = sc.eye( 3 )
x = sc.random.rand(3)
y = sc.ones( 3 )
B = matrix_tensorify( A, x )
assert ( B.dot( y ) == A * x.dot(y) ).all()
C = B.swapaxes( 2, 0 )
assert ( C.dot( y ) == sc.outer(x, y) ).all()
D = B.swapaxes( 2, 1 )
assert ( D.dot( y ) == sc.outer(y, x) ).all()
示例12: K
def K(eta, g, h, y, n2, pre_s1, pre_s2, pre_s3, qp, phix):
phi = phix[:, 0]
scaled_quadratures = phix[:, 1]/sqrt(eta)
z = (outer(cos(phi), qp[:, 0]) + outer(sin(phi), qp[:, 1]) - scaled_quadratures[:, None]) / h
zy = z/y
zy2 = zy**2
f_denom = 1./(n2 + zy2[:, :, None])
v = zy*sin(z)*dot(f_denom, pre_s3)
cos_z = cos(z)
v += zy2*(dot(f_denom, pre_s1) - cos_z*dot(f_denom, pre_s2))
del f_denom
v /= sqrt(pi)
v += cos_z*(exp(y**2)-1./(2.*sqrt(pi)))+(1./(2.*sqrt(pi))-1.)
v /= 4.*pi*g
return v
示例13: coulomb_mat_eigvals
def coulomb_mat_eigvals(atoms, at_idx, r_cut, do_calc_connect=True, n_eigs=20):
if do_calc_connect:
atoms.set_cutoff(8.0)
atoms.calc_connect()
pos = sp.vstack((sp.asarray([sp.asarray(a.diff) for a in atoms.neighbours[at_idx]]), sp.zeros(3)))
Z = sp.hstack((sp.asarray([atoms.z[a.j] for a in atoms.neighbours[at_idx]]), atoms.z[at_idx]))
M = sp.outer(Z, Z) / (sp.spatial.distance_matrix(pos, pos) + np.eye(pos.shape[0]))
sp.fill_diagonal(M, 0.5 * Z ** 2.4)
# data = [[atoms.z[a.j], sp.asarray(a.diff)] for a in atoms.neighbours[at_idx]]
# data.append([atoms.z[at_idx], sp.array([0,0,0])]) # central atom
# M = sp.zeros((len(data), len(data)))
# for i, atom1 in enumerate(data):
# M[i,i] = 0.5 * atom1[0] ** 2.4
# for j, atom2 in enumerate(data[i+1:]):
# j += i+1
# M[i,j] = atom1[0] * atom2[0] / LA.norm(atom1[1] - atom2[1])
# M = 0.5 * (M + M.T)
eigs = (LA.eigh(M, eigvals_only=True))[::-1]
if n_eigs == None:
return eigs # all
elif eigs.size >= n_eigs:
return eigs[:n_eigs] # only first few eigenvectors
else:
return sp.hstack((eigs, sp.zeros(n_eigs - eigs.size))) # zero-padded extra fields
示例14: __iter__
def __iter__(self):
dim = self.wrt.shape[0]
I = scipy.eye(dim)
# Square root of covariance matrix.
A = scipy.eye(dim)
center = self.wrt.copy()
n_evals = 0
best_wrt = None
best_x = float("-inf")
for i, (args, kwargs) in enumerate(self.args):
# Draw samples, evaluate and update best solution if a better one
# was found.
samples = scipy.random.standard_normal((self.batch_size, dim))
samples = scipy.dot(samples, A) + center
fitnesses = [self.f(samples[j], *args, **kwargs) for j in range(samples.shape[0])]
fitnesses = scipy.array(fitnesses).flatten()
if fitnesses.max() > best_x:
best_loss = fitnesses.max()
self.wrt[:] = samples[fitnesses.argmax()]
# Update center and variances.
utilities = self.compute_utilities(fitnesses)
center += scipy.dot(scipy.dot(utilities, samples), A)
# TODO: vectorize this
cov_gradient = sum([u * (scipy.outer(s, s) - I) for (s, u) in zip(samples, utilities)])
update = scipy.linalg.expm2(A * cov_gradient * self.step_rate * 0.5)
A[:] = scipy.dot(A, update)
yield dict(loss=-best_x, n_iter=i)
示例15: LSTD_Qvalues
def LSTD_Qvalues(Ts, policy, R, fMap, discountFactor):
""" LSTDQ is like LSTD, but with features replicated
once for each possible action.
Returns Q-values in a 2D array. """
numA = len(Ts)
dim = len(Ts[0])
numF = len(fMap)
fMapRep = zeros((numF * numA, dim * numA))
for a in range(numA):
fMapRep[numF * a:numF * (a + 1), dim * a:dim * (a + 1)] = fMap
statMatrix = zeros((numF * numA, numF * numA))
statResidual = zeros(numF * numA)
for sto in range(dim):
r = R[sto]
fto = zeros(numF * numA)
for nextA in range(numA):
fto += fMapRep[:, sto + nextA * dim] * policy[sto][nextA]
for sfrom in range(dim):
for a in range(numA):
ffrom = fMapRep[:, sfrom + a * dim]
prob = Ts[a][sfrom, sto]
statMatrix += outer(ffrom, ffrom - discountFactor * fto) * prob
statResidual += ffrom * r * prob
Qs = zeros((dim, numA))
w = lstsq(statMatrix, statResidual)[0]
for a in range(numA):
Qs[:,a] = dot(w[numF*a:numF*(a+1)], fMap)
return Qs