本文整理汇总了Python中numpy.asfortranarray函数的典型用法代码示例。如果您正苦于以下问题:Python asfortranarray函数的具体用法?Python asfortranarray怎么用?Python asfortranarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asfortranarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: adjoint
def adjoint(self, inputs, outputs):
"""The adjoint operator.
Reads from inputs and writes to outputs.
"""
if self.implementation == Impl['halide'] :
#Halide implementation
if len(self.H.shape) == 2:
tmpin = np.asfortranarray( inputs[0][..., np.newaxis].astype(np.float32) )
else:
tmpin = np.asfortranarray( inputs[0].astype(np.float32) )
Halide('At_warp.cpp').At_warp( tmpin, self.Hf, self.tmpadj ) #Call
np.copyto(outputs[0], self.tmpadj )
else:
#CV2 version
inimg = inputs[0]
if len(self.H.shape) == 2:
# + cv2.WARP_INVERSE_MAP
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg), self.Hinv.T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.)
np.copyto( outputs[0], warpedInput )
else:
outputs[0][:] = 0.0
for j in range(self.H.shape[2]):
warpedInput = cv2.warpPerspective(np.asfortranarray(inimg[:,:,:,j]), self.Hinv[:,:,j].T, inimg.shape[1::-1], flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0.) #Necessary due to array layout in opencv
outputs[0] += warpedInput
示例2: test_bind
def test_bind():
mod = Representation(1, k_states=2)
# Test invalid endogenous array (it must be ndarray)
assert_raises(ValueError, lambda: mod.bind([1,2,3,4]))
# Test valid (nobs x 1) endogenous array
mod.bind(np.arange(10)*1.)
assert_equal(mod.nobs, 10)
# Test valid (k_endog x 0) endogenous array
mod.bind(np.zeros(0,dtype=np.float64))
# Test invalid (3-dim) endogenous array
assert_raises(ValueError, lambda: mod.bind(np.arange(12).reshape(2,2,3)*1.))
# Test valid F-contiguous
mod.bind(np.asfortranarray(np.arange(10).reshape(1,10)))
assert_equal(mod.nobs, 10)
# Test valid C-contiguous
mod.bind(np.arange(10).reshape(10,1))
assert_equal(mod.nobs, 10)
# Test invalid F-contiguous
assert_raises(ValueError, lambda: mod.bind(np.asfortranarray(np.arange(10).reshape(10,1))))
# Test invalid C-contiguous
assert_raises(ValueError, lambda: mod.bind(np.arange(10).reshape(1,10)))
示例3: test_mem_layout
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
示例4: psiDerivativecomputations
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
ARD = (len(lengthscale)!=1)
N,M,Q = self.get_dimensions(Z, variational_posterior)
psi1_gpu = self.gpuCache['psi1_gpu']
psi2n_gpu = self.gpuCache['psi2n_gpu']
l_gpu = self.gpuCache['l_gpu']
Z_gpu = self.gpuCache['Z_gpu']
mu_gpu = self.gpuCache['mu_gpu']
S_gpu = self.gpuCache['S_gpu']
gamma_gpu = self.gpuCache['gamma_gpu']
dvar_gpu = self.gpuCache['dvar_gpu']
dl_gpu = self.gpuCache['dl_gpu']
dZ_gpu = self.gpuCache['dZ_gpu']
dmu_gpu = self.gpuCache['dmu_gpu']
dS_gpu = self.gpuCache['dS_gpu']
dgamma_gpu = self.gpuCache['dgamma_gpu']
grad_l_gpu = self.gpuCache['grad_l_gpu']
grad_mu_gpu = self.gpuCache['grad_mu_gpu']
grad_S_gpu = self.gpuCache['grad_S_gpu']
grad_gamma_gpu = self.gpuCache['grad_gamma_gpu']
log_denom1_gpu = self.gpuCache['log_denom1_gpu']
log_denom2_gpu = self.gpuCache['log_denom2_gpu']
log_gamma_gpu = self.gpuCache['log_gamma_gpu']
log_gamma1_gpu = self.gpuCache['log_gamma1_gpu']
if self.GPU_direct:
dL_dpsi1_gpu = dL_dpsi1
dL_dpsi2_gpu = dL_dpsi2
dL_dpsi0_sum = gpuarray.sum(dL_dpsi0).get()
else:
dL_dpsi1_gpu = self.gpuCache['dL_dpsi1_gpu']
dL_dpsi2_gpu = self.gpuCache['dL_dpsi2_gpu']
dL_dpsi1_gpu.set(np.asfortranarray(dL_dpsi1))
dL_dpsi2_gpu.set(np.asfortranarray(dL_dpsi2))
dL_dpsi0_sum = dL_dpsi0.sum()
self.reset_derivative()
# t=self.g_psi1compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi1_gpu,psi1_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi1compDer '+str(t)
# t=self.g_psi2compDer(dvar_gpu,dl_gpu,dZ_gpu,dmu_gpu,dS_gpu,dL_dpsi2_gpu,psi2n_gpu, np.float64(variance),l_gpu,Z_gpu,mu_gpu,S_gpu, np.int32(N), np.int32(M), np.int32(Q), block=(self.threadnum,1,1), grid=(self.blocknum,1),time_kernel=True)
# print 'g_psi2compDer '+str(t)
self.g_psi1compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi1_gpu.gpudata,psi1_gpu.gpudata, log_denom1_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
self.g_psi2compDer.prepared_call((self.blocknum,1),(self.threadnum,1,1),dvar_gpu.gpudata,dl_gpu.gpudata,dZ_gpu.gpudata,dmu_gpu.gpudata,dS_gpu.gpudata,dgamma_gpu.gpudata,dL_dpsi2_gpu.gpudata,psi2n_gpu.gpudata, log_denom2_gpu.gpudata, log_gamma_gpu.gpudata, log_gamma1_gpu.gpudata, np.float64(variance),l_gpu.gpudata,Z_gpu.gpudata,mu_gpu.gpudata,S_gpu.gpudata,gamma_gpu.gpudata,np.int32(N), np.int32(M), np.int32(Q))
dL_dvar = dL_dpsi0_sum + gpuarray.sum(dvar_gpu).get()
sum_axis(grad_mu_gpu,dmu_gpu,N*Q,self.blocknum)
dL_dmu = grad_mu_gpu.get()
sum_axis(grad_S_gpu,dS_gpu,N*Q,self.blocknum)
dL_dS = grad_S_gpu.get()
sum_axis(grad_gamma_gpu,dgamma_gpu,N*Q,self.blocknum)
dL_dgamma = grad_gamma_gpu.get()
dL_dZ = dZ_gpu.get()
if ARD:
sum_axis(grad_l_gpu,dl_gpu,Q,self.blocknum)
dL_dlengscale = grad_l_gpu.get()
else:
dL_dlengscale = gpuarray.sum(dl_gpu).get()
return dL_dvar, dL_dlengscale, dL_dZ, dL_dmu, dL_dS, dL_dgamma
示例5: test_nmf
def test_nmf():
img_file = 'boat.png'
try:
img = Image.open(img_file)
except:
print("Cannot load image %s : skipping test" %img_file)
return None
I = np.array(img) / 255.
if I.ndim == 3:
A = np.asfortranarray(I.reshape((I.shape[0],I.shape[1] * I.shape[2])),dtype = myfloat)
rgb = True
else:
A = np.asfortranarray(I,dtype = myfloat)
rgb = False
m = 16;n = 16;
X = spams.im2col_sliding(A,m,n,rgb)
X = X[:,::10]
X = np.asfortranarray(X / np.tile(np.sqrt((X * X).sum(axis=0)),(X.shape[0],1)),dtype = myfloat)
########## FIRST EXPERIMENT ###########
tic = time.time()
(U,V) = spams.nmf(X,return_lasso= True,K = 49,numThreads=4,iter = -5)
tac = time.time()
t = tac - tic
print('time of computation for Dictionary Learning: %f' %t)
print('Evaluating cost function...')
Y = X - U * V
R = np.mean(0.5 * (Y * Y).sum(axis=0))
print('objective function: %f' %R)
return None
示例6: test_mul
def test_mul():
## Test multiply method of a distributed matrix
ms, ns = 5, 14
gA = np.random.standard_normal((ms, ns)).astype(np.float64)
gA = np.asfortranarray(gA)
dA = core.DistributedMatrix.from_global_array(gA, rank=0)
gB = np.random.standard_normal((ms, ns)).astype(np.float64)
gB = np.asfortranarray(gB)
dB = core.DistributedMatrix.from_global_array(gB, rank=0)
dC = dA * dB
gC = dC.to_global_array(rank=0)
a = np.random.standard_normal(ns).astype(np.float64)
comm.Bcast(a, root=0) # ensure all process have the same data
dD = dA * a
gD = dD.to_global_array(rank=0)
alpha = 2.345
dE = dA * alpha
gE = dE.to_global_array(rank=0)
if rank == 0:
assert allclose(gA * gB, gC)
assert allclose(gA * a, gD)
assert allclose(gA * alpha, gE)
示例7: subset_selection_xtx
def subset_selection_xtx(X, Y):
""" Subsets selection using EvalSubsetsUsingXtx in the Earth package.
"""
X = numpy.asfortranarray(X, dtype=ctypes.c_double)
Y = numpy.asfortranarray(Y, dtype=ctypes.c_double)
if Y.ndim == 1:
Y = Y.reshape((-1, 1), order="F")
if X.shape[0] != Y.shape[0]:
raise ValueError("First dimensions of bx and y must be the same")
var_count = X.shape[1]
resp_count = Y.shape[1]
cases = X.shape[0]
subsets = numpy.zeros((var_count, var_count), dtype=ctypes.c_bool,
order="F")
rss_vec = numpy.zeros((var_count,), dtype=ctypes.c_double, order="F")
weights = numpy.ones((cases,), dtype=ctypes.c_double, order="F")
rval = _c_eval_subsets_xtx(subsets, rss_vec, cases, resp_count, var_count,
X, Y, weights)
if rval == 1:
raise numpy.linalg.LinAlgError("Lin. dep. terms in X")
elif rval == 2:
raise Exception("Trying to prune the intercept.")
elif rval != 0:
raise Exception("Error %i" % rval)
subsets_ind = numpy.zeros((var_count, var_count), dtype=int)
for i, used in enumerate(subsets.T):
subsets_ind[i, :i + 1] = numpy.where(used)[0]
return subsets_ind, rss_vec
示例8: test_mask_halide
def test_mask_halide(self):
"""Test mask lin op in halide.
"""
if halide_installed():
# Load image
testimg_filename = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'angela.jpg')
# opens the file using Pillow - it's not an array yet
img = Image.open(testimg_filename)
np_img = np.asfortranarray(im2nparray(img))
# Test problem
output = np.zeros_like(np_img)
mask = np.asfortranarray(np.random.randn(*list(np_img.shape)).astype(np.float32))
mask = np.maximum(mask, 0.)
Halide('A_mask.cpp').A_mask(np_img, mask, output) # Call
output_ref = mask * np_img
# Transpose
output_trans = np.zeros_like(np_img)
Halide('At_mask.cpp').At_mask(np_img, mask, output_trans) # Call
self.assertItemsAlmostEqual(output, output_ref)
self.assertItemsAlmostEqual(output_trans, output_ref)
示例9: test_matrix_multiply_ff
def test_matrix_multiply_ff(self):
"""matrix multiply two FORTRAN layout matrices"""
a = np.asfortranarray(np.random.randn(M,N))
b = np.asfortranarray(np.random.randn(N,K))
res = gulinalg.matrix_multiply(a,b)
ref = np.dot(a,b)
assert_allclose(res, ref)
示例10: runRandomWalking
def runRandomWalking(self, w, nAlgs, nWalks = 1, nIters=-1, nErrorsLimit=-1, allowSimilar=False, pTransition=0.8,
randomSeed=0):
RunRandomWalkingResult = collections.namedtuple('RunRandomWalkingResult', 'W isSource')
nFeatures = w.shape[0]
w0 = np.tile(w, (nWalks, 1))
sessionStats = self.getStats()
if sessionStats.nFeatures != nFeatures:
raise Exception('sessionStats.nFeatures != w0.shape[1]')
W = np.asfortranarray(np.zeros((nAlgs, nFeatures)).astype(np.float32))
isSource = np.asfortranarray(np.zeros((nAlgs, 1)).astype(np.uint8))
w0_p = w0.ctypes.data_as(self.lsPlugin.c_float_p)
W_p = W.ctypes.data_as(self.lsPlugin.c_float_p)
isSource_p = isSource.ctypes.data_as(self.lsPlugin.c_uint8_p)
pTransition_p = (ctypes.c_float * 1)()
pTransition_p[0] = pTransition;
#pTransition.ctypes.data_as(self.c_float_p)
nAlgs = self.lsPlugin.dll.runRandomWalking(self.sessionId, w0_p, nWalks, nAlgs, nIters, nErrorsLimit,
allowSimilar, pTransition_p, randomSeed, W_p, isSource_p)
self.lsPlugin.verifyCall(nAlgs)
return RunRandomWalkingResult(W, isSource)
示例11: setup
def setup(self):
self.p = numpy.array([[27, 51],
[66, 85],
[77, 45]])
self.p3 = numpy.array([[27, 51, 37],
[66, 85, 25],
[77, 45, 73]])
self.space = numpy.array((100, 100))
self.space3 = numpy.array((100, 100, 100))
self.radii = numpy.array((5, 6, 7))
self.g = nanshe.syn.data.generate_hypersphere_masks(
self.space, self.p, self.radii
)
self.g = self.g.reshape((self.g.shape[0], -1))
self.g = self.g.transpose()
self.g = numpy.asmatrix(self.g)
self.g = numpy.asfortranarray(self.g)
self.g3 = nanshe.syn.data.generate_hypersphere_masks(
self.space3, self.p3, self.radii
)
self.g3 = self.g3.reshape((self.g3.shape[0], -1))
self.g3 = self.g3.transpose()
self.g3 = numpy.asmatrix(self.g3)
self.g3 = numpy.asfortranarray(self.g3)
示例12: endog
def endog(self, value):
self._endog = np.array(value, order='A')
# (T x M)
if (self.nobs, self.k_endog) == self._endog.shape:
self._endog = self._endog.T
# (M x T)
elif (self.k_endog, self.nobs) == self._endog.shape:
pass
else:
raise ValueError('Invalid endogenous array shape. Required'
'(%d, %d) or (%d, %d). Got %s'
% (self.nobs, self.k_endog, self.k_endog,
self.nobs, str(self._endog.shape)))
if not self._endog.flags['F_CONTIGUOUS']:
self._endog = np.asfortranarray(self._endog)
# Create a new lag matrix, shaped (k_ar, nobs) = (k_ar, T)
self._lagged = np.asfortranarray(np.hstack([
self.endog[:, self.order-i:-i].T
for i in range(1, self.order+1)
]).T)
# Set calculation flags
self._recalculate = True
示例13: _set_params
def _set_params(self, p):
new_kern_params = p[:self.kern.num_params_transformed()]
new_likelihood_params = p[self.kern.num_params_transformed():]
old_likelihood_params = self.likelihood._get_params()
self.kern._set_params_transformed(new_kern_params)
self.likelihood._set_params_transformed(new_likelihood_params)
self.K = self.kern.K(self.X)
#Re fit likelihood approximation (if it is an approx), as parameters have changed
if isinstance(self.likelihood, Laplace):
self.likelihood.fit_full(self.K)
self.K += self.likelihood.covariance_matrix
self.Ki, self.L, self.Li, self.K_logdet = pdinv(self.K)
# the gradient of the likelihood wrt the covariance matrix
if self.likelihood.YYT is None:
# alpha = np.dot(self.Ki, self.likelihood.Y)
alpha, _ = dpotrs(self.L, self.likelihood.Y, lower=1)
self.dL_dK = 0.5 * (tdot(alpha) - self.output_dim * self.Ki)
else:
# tmp = mdot(self.Ki, self.likelihood.YYT, self.Ki)
tmp, _ = dpotrs(self.L, np.asfortranarray(self.likelihood.YYT), lower=1)
tmp, _ = dpotrs(self.L, np.asfortranarray(tmp.T), lower=1)
self.dL_dK = 0.5 * (tmp - self.output_dim * self.Ki)
#Adding dZ_dK (0 for a non-approximate likelihood, compensates for
#additional gradients of K when log-likelihood has non-zero Z term)
self.dL_dK += self.likelihood.dZ_dK
示例14: test_conjGrad
def test_conjGrad():
A = np.asfortranarray(np.random.normal(size = (5000,500)))
#* np.random.seed(0)
#* A = np.asfortranarray(np.random.normal(size = (10,5)))
A = np.asfortranarray(np.dot(A.T,A),dtype=myfloat)
b = np.ones((A.shape[1],),dtype=myfloat,order="FORTRAN")
x0 = b
tol = 1e-4
itermax = int(0.5 * len(b))
tic = time.time()
for i in xrange(0,20):
y1 = np.linalg.solve(A,b)
tac = time.time()
print " Time (numpy): ", tac - tic
x1 = np.abs(b - np.dot(A,y1))
print "Mean error on b : %f" %(x1.sum() / b.shape[0])
tic = time.time()
for i in xrange(0,20):
y2 = spams.conjGrad(A,b,x0,tol,itermax)
#* y2 = spams.conjGrad(A,b)
tac = time.time()
print " Time (spams): ", tac - tic
x1 = np.dot(A,y2)
x2 = np.abs(b - x1)
print "Mean error on b : %f" %(x2.sum() / b.shape[0])
err = abs(y1 - y2)
return err.max()
示例15: test_cd
def test_cd():
np.random.seed(0)
X = np.asfortranarray(np.random.normal(size = (64,100)))
X = np.asfortranarray(X / np.tile(np.sqrt((X*X).sum(axis=0)),(X.shape[0],1)),dtype=myfloat)
D = np.asfortranarray(np.random.normal(size = (64,100)))
D = np.asfortranarray(D / np.tile(np.sqrt((D*D).sum(axis=0)),(D.shape[0],1)),dtype=myfloat)
# parameter of the optimization procedure are chosen
lambda1 = 0.015
mode = spams.PENALTY
tic = time.time()
alpha = spams.lasso(X,D,lambda1 = lambda1,mode = mode,numThreads = 4)
tac = time.time()
t = tac - tic
xd = X - D * alpha
E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
print("%f signals processed per second for LARS" %(X.shape[1] / t))
print('Objective function for LARS: %g' %E)
tol = 0.001
itermax = 1000
tic = time.time()
# A0 = ssp.csc_matrix(np.empty((alpha.shape[0],alpha.shape[1])))
A0 = ssp.csc_matrix((alpha.shape[0],alpha.shape[1]),dtype=myfloat)
alpha2 = spams.cd(X,D,A0,lambda1 = lambda1,mode = mode,tol = tol, itermax = itermax,numThreads = 4)
tac = time.time()
t = tac - tic
print("%f signals processed per second for CD" %(X.shape[1] / t))
xd = X - D * alpha2
E = np.mean(0.5 * (xd * xd).sum(axis=0) + lambda1 * np.abs(alpha).sum(axis=0))
print('Objective function for CD: %g' %E)
print('With Random Design, CD can be much faster than LARS')
return None