本文整理汇总了Python中pycuda.gpuarray.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dot函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Average_Alpha2
def Average_Alpha2( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi3_GPU, Psi2_GPU.conj() ).get().imag
average += gpuarray.dot( Psi1_GPU, Psi4_GPU.conj() ).get().imag
average *= -2.*self.dX*self.dY
return average
示例2: test_dot_allocator
def test_dot_allocator(self):
from pytest import skip
skip("https://github.com/inducer/pycuda/issues/163")
import pycuda.tools
pool = pycuda.tools.DeviceMemoryPool()
a_cpu = np.random.randint(low=512,high=1024,size=1024)
b_cpu = np.random.randint(low=512,high=1024,size=1024)
# Compute the result on the CPU
dot_cpu_1 = np.dot(a_cpu, b_cpu)
a_gpu = gpuarray.to_gpu(a_cpu)
b_gpu = gpuarray.to_gpu(b_cpu)
# Compute the result on the GPU using different allocators
dot_gpu_1 = gpuarray.dot(a_gpu, b_gpu)
dot_gpu_2 = gpuarray.dot(a_gpu, b_gpu, allocator=pool.allocate)
# Test that we get the correct results
assert dot_cpu_1 == dot_gpu_1.get()
assert dot_cpu_1 == dot_gpu_2.get()
# Test that result arrays were allocated with the appropriate allocator
assert dot_gpu_1.allocator == a_gpu.allocator
assert dot_gpu_2.allocator == pool.allocate
示例3: cnvinv_objfun
def cnvinv_objfun(self, z, sz, y_gpu, alpha=0., beta=0.):
"""
Computes objective function value of 'lbfgsb' mode of deconv method.
See deconv for details.
"""
if z.__class__ == np.ndarray:
z = np.array(np.reshape(z,sz)).astype(np.float32)
z_gpu = cua.to_gpu(z)
self.res_gpu = y_gpu - self.cnv(z_gpu)
obj = 0.5*(cua.dot(self.res_gpu,self.res_gpu,dtype=np.float64))
# Thikonov regularization, dinstinguish between 'X' and 'F' cases
# as size of corresponding z is different
# alpha > 0: Thikonov on the gradient of z
if alpha > 0:
if self.__id__ == 'X':
self.lz_gpu = shock.laplace_stack_gpu(z_gpu, mode='same')
elif self.__id__ == 'F':
self.lz_gpu = gputools.laplace_gpu(z_gpu, mode='same')
obj += 0.5*alpha*(cua.dot(z_gpu, self.lz_gpu, dtype=np.float64))
# beta > 0: Thikonov on z
if beta > 0:
obj += 0.5*beta*(cua.dot(z_gpu, z_gpu,dtype=np.float64))
return obj.get()
示例4: Average_Beta
def Average_Beta( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot( Psi1_GPU, Psi1_GPU.conj() ).get()
average += gpuarray.dot( Psi2_GPU, Psi2_GPU.conj() ).get()
average -= gpuarray.dot( Psi3_GPU, Psi3_GPU.conj() ).get()
average -= gpuarray.dot( Psi4_GPU, Psi4_GPU.conj() ).get()
average *= self.dX*self.dY
return average
示例5: Average_Alpha2
def Average_Alpha2( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = - gpuarray.dot(Psi4_GPU,Psi1_GPU.conj()).get()
average += gpuarray.dot(Psi3_GPU,Psi2_GPU.conj()).get()
average += - gpuarray.dot(Psi2_GPU,Psi3_GPU.conj()).get()
average += gpuarray.dot(Psi1_GPU,Psi4_GPU.conj()).get()
average *= 1j*self.dX*self.dY*self.dZ
return average
示例6: _Average_Px
def _Average_Px( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Px_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Px_GPU).get()
average *= self.dX*self.dY*self.dZ
return average
示例7: Average_Y
def Average_Y( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Y_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Y_GPU).get()
average *= self.dX*self.dY
return average
示例8: Average_Py
def Average_Py( self, Psi1_GPU, Psi2_GPU, Psi3_GPU, Psi4_GPU):
average = gpuarray.dot(Psi1_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi2_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi3_GPU.__abs__()**2,self.Py_GPU).get()
average += gpuarray.dot(Psi4_GPU.__abs__()**2,self.Py_GPU).get()
average *= self.dPx*self.dPy
return average
示例9: compute_obj
def compute_obj(self, w_gpu):
self.dfs_gpu = 1. * (self.weight(w_gpu) - self.data_gpu)
res = 0.5 * self.lamda * cua.dot(self.dfs_gpu, self.dfs_gpu)
reg = ( 0.5 * self.beta * cua.dot(w_gpu - self.u_gpu,
w_gpu - self.u_gpu))
if self.eta:
reg += 0.5 * self.eta * cua.dot(w_gpu, laplace3d_gpu(w_gpu))
return res + reg
示例10: check_termination
def check_termination(self):
"""
Check various termination criteria
"""
# First check if we are doing termination based on running time
if (self.options.time_limit):
self.time = time.clock - self.time_start
if (self.time >= self.options.maxtime):
self.term_reason = 'Exceeded time limit'
return
# Now check if we are doing break by tolx
if (self.options.use_tolx):
if (np.sqrt(cua.dot(self.dx,self.dx).get())/
np.sqrt(cua.dot(self.oldx,self.oldx).get()) < self.options.tolx):
self.term_reason = 'Relative change in x small enough'
return
# Are we doing break by tolo (tol obj val)
if (self.options.use_tolo and self.iter > 2):
delta = abs(self.obj-self.oldobj)
if (delta < self.options.tolo):
self.term_reason ='Relative change in objvalue small enough'
return
# Check if change in x and gradient are small enough
# we don't want that for now
# if (np.sqrt((cua.dot(self.dx,self.dx).get())) < self.options.tolx) \
# or (np.sqrt(cua.dot(self.dg,self.dg).get()) < self.options.tolg):
# self.term_reason = '|x_t+1 - x_t|=0 or |grad_t+1 - grad_t| < 1e-9'
# return
# Finally the plain old check if max iter has been achieved
if (self.iter >= self.options.maxiter):
self.term_reason = 'Maximum number of iterations reached'
return
# KKT violation
if (self.options.use_kkt):
if np.abs(np.sqrt(cua.dot(self.x,self.grad).get())) <= options.tolk:
self.term_reason = '|x^T * grad| < opt.pbb_gradient_norm'
return
# Gradient check
if (self.options.use_tolg):
nr = cua.max(cua.fabs(self.grad)).get();
if (nr < self.options.tolg):
self.term_reason = '|| grad ||_inf < opt.tolg'
return
# No condition met, so return false
self.term_reason = 0;
示例11: __init__
def __init__(self, a, b, pagelocked_allocator):
self.gpu_result = gpuarray.dot(a, b)
self.gpu_finished_evt = drv.Event()
self.gpu_finished_evt.record()
self.gpu_finished = False
self.pagelocked_allocator = pagelocked_allocator
示例12: one_iteration
def one_iteration(self, compute_real_residual=False):
# typed up from J.R. Shewchuk,
# An Introduction to the Conjugate Gradient Method
# Without the Agonizing Pain, Edition 1 1/4 [8/1994]
# Appendix B3
q = self.operator(self.d)
myip = gpuarray.dot(self.d, q)
alpha = self.guarded_div(self.delta, myip)
self.lc2(1, self.x, alpha, self.d, out=self.x)
if compute_real_residual:
self.residual = self.lc2(
1, self.rhs, -1, self.operator(self.x))
else:
self.lc2(1, self.residual, -alpha, q, out=self.residual)
s = self.precon(self.residual)
delta_old = self.delta
delta = AsyncInnerProduct(self.residual, s,
self.pagelocked_allocator)
self.delta = delta.gpu_result
beta = self.guarded_div(self.delta, delta_old)
self.lc2(1, s, beta, self.d, out=self.d)
if compute_real_residual:
self.real_delta_queue.append(delta)
示例13: norm
def norm(self):
"""The L2-norm on the flattened vector."""
if self.state is DeviceDataMixin.DEVICE:
return np.sqrt(gpuarray.dot(self.array, self.array).get())
elif self.state in [DeviceDataMixin.DEVICE_UNALLOCATED,
DeviceDataMixin.HOST, DeviceDataMixin.BOTH]:
return np.sqrt(np.dot(self.data_ro, self.data_ro))
else:
raise RuntimeError('Data neither on host nor device, oops!')
示例14: magnitude
def magnitude(vec, vec2):
#, fn = mod.get_function('magnitude')):
#gpu_vec = drv.mem_alloc(vec.nbytes)
#drv.memcpy_htod(gpu_vec, vec)
#fn(gpu_vec, block=(512, 1, 1))
#dest = drv.from_device_like(gpu_vec, vec)
#print 'Dot product: ', dest[0]
gpu_arry = gpuarr.to_gpu_async(vec)
gpu_arry2 = gpuarr.to_gpu_async(vec2)
mag = cumath.sqrt(gpuarr.dot(gpu_arry, gpu_arry, dtype=np.float32))
mag2 = cumath.sqrt(gpuarr.dot(gpu_arry2, gpu_arry2, dtype=np.float32))
product = gpuarr.dot(gpu_arry, gpu_arry2, dtype=np.float32) / mag + mag2
print product
return product.get()
示例15: test_dot
def test_dot(self):
from pycuda.curandom import rand as curand
a_gpu = curand((200000,))
a = a_gpu.get()
b_gpu = curand((200000,))
b = b_gpu.get()
dot_ab = numpy.dot(a, b)
dot_ab_gpu = gpuarray.dot(a_gpu, b_gpu).get()
assert abs(dot_ab_gpu-dot_ab)/abs(dot_ab) < 1e-4