當前位置: 首頁>>代碼示例>>Python>>正文


Python pyopencl.Context方法代碼示例

本文整理匯總了Python中pyopencl.Context方法的典型用法代碼示例。如果您正苦於以下問題:Python pyopencl.Context方法的具體用法?Python pyopencl.Context怎麽用?Python pyopencl.Context使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pyopencl的用法示例。


在下文中一共展示了pyopencl.Context方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _get_device

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def _get_device(self, gpuid):
        """Return GPU devices, context, and queue."""
        all_platforms = cl.get_platforms()
        platform = next((p for p in all_platforms if
                         p.get_devices(device_type=cl.device_type.GPU) != []),
                        None)
        if platform is None:
            raise RuntimeError('No OpenCL GPU device found.')
        my_gpu_devices = platform.get_devices(device_type=cl.device_type.GPU)
        context = cl.Context(devices=my_gpu_devices)
        if gpuid > len(my_gpu_devices)-1:
            raise RuntimeError(
                'No device with gpuid {0} (available device IDs: {1}).'.format(
                    gpuid, np.arange(len(my_gpu_devices))))
        queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
        if self.settings['debug']:
            print("Selected Device: ", my_gpu_devices[gpuid].name)
        return my_gpu_devices, context, queue 
開發者ID:pwollstadt,項目名稱:IDTxl,代碼行數:20,代碼來源:estimators_opencl.py

示例2: __init__

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def __init__(self, device=0, platform=0, iterations=7, compiler_options=None):
        """Creates OpenCL device context and reads device properties

        :param device: The ID of the OpenCL device to use for benchmarking
        :type device: int

        :param iterations: The number of iterations to run the kernel during benchmarking, 7 by default.
        :type iterations: int
        """
        if not cl:
            raise ImportError("Error: pyopencl not installed, please install e.g. using 'pip install pyopencl'.")

        self.iterations = iterations
        #setup context and queue
        platforms = cl.get_platforms()
        self.ctx = cl.Context(devices=[platforms[platform].get_devices()[device]])

        self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        self.mf = cl.mem_flags
        #inspect device properties
        self.max_threads = self.ctx.devices[0].get_info(cl.device_info.MAX_WORK_GROUP_SIZE)
        self.compiler_options = compiler_options or []

        #collect environment information
        dev = self.ctx.devices[0]
        env = dict()
        env["platform_name"] = dev.platform.name
        env["platform_version"] = dev.platform.version
        env["device_name"] = dev.name
        env["device_version"] = dev.version
        env["opencl_c_version"] = dev.opencl_c_version
        env["driver_version"] = dev.driver_version
        env["iterations"] = self.iterations
        env["compiler_options"] = compiler_options
        self.env = env
        self.name = dev.name 
開發者ID:benvanwerkhoven,項目名稱:kernel_tuner,代碼行數:38,代碼來源:opencl.py

示例3: initialize_opencl

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def initialize_opencl(self, cl_platform_index=None, cl_device_index=None, ctx=None, queue=None):
        assert HAVE_PYOPENCL, 'PyOpenCL is not installed'

        global default_platform_index
        global default_device_index
        
        if ctx is None and queue is None:
            if cl_platform_index is None:
                if default_platform_index is not None and default_device_index is not None:
                    self.cl_platform_index = default_platform_index
                    self.cl_device_index = default_device_index
                    self.devices = [pyopencl.get_platforms()[self.cl_platform_index].get_devices()[self.cl_device_index]]
                    self.ctx = pyopencl.Context(self.devices)
                else:
                    self.ctx = pyopencl.create_some_context(interactive=False)
            else:
                self.cl_platform_index = cl_platform_index
                self.cl_device_index = cl_device_index
                self.devices = [pyopencl.get_platforms()[self.cl_platform_index].get_devices()[self.cl_device_index]]
                self.ctx = pyopencl.Context(self.devices)
            self.queue = pyopencl.CommandQueue(self.ctx)
        else:
            assert cl_platform_index is None and cl_device_index is None
            self.ctx = ctx
            self.queue = queue
        
        self.max_wg_size = self.ctx.devices[0].get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE) 
開發者ID:tridesclous,項目名稱:tridesclous,代碼行數:29,代碼來源:cltools.py

示例4: _get_device

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def _get_device(gpuid):
    """Return GPU devices, context, and queue."""
    platform = cl.get_platforms()
    platf_idx = find_nonempty(platform)
    my_gpu_devices = platform[platf_idx].get_devices(
                                                device_type=cl.device_type.GPU)
    context = cl.Context(devices=my_gpu_devices)
    queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
    if VERBOSE:
        print(("Selected Device: ", my_gpu_devices[gpuid].name))
    return my_gpu_devices, context, queue 
開發者ID:pwollstadt,項目名稱:IDTxl,代碼行數:13,代碼來源:neighbour_search_opencl.py

示例5: test_gpu_vector_sum

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def test_gpu_vector_sum(a, b):
    #define the PyOpenCL Context
    platform = cl.get_platforms()[0]
    device = platform.get_devices()[0]
    context = cl.Context([device])
    queue = cl.CommandQueue(context, \
                            properties=cl.command_queue_properties.PROFILING_ENABLE)   
    #prepare the data structure
    a_buffer = cl.Buffer\
               (context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=a)
    b_buffer = cl.Buffer\
               (context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=b)
    c_buffer = cl.Buffer\
               (context, cl.mem_flags.WRITE_ONLY, b.nbytes)   
    program = cl.Program(context, """
    __kernel void sum(__global const float *a, __global const float *b, __global float *c)
    {
        int i = get_global_id(0);
        int j;
        for(j = 0; j < 10000; j++)
        {
            c[i] = a[i] + b[i];
        }
    }""").build()
    #start the gpu test
    gpu_start_time = time()   
    event = program.sum(queue, a.shape, None, a_buffer, b_buffer, c_buffer)   
    event.wait()   
    elapsed = 1e-9*(event.profile.end - event.profile.start)   
    print("GPU Kernel evaluation Time: {0} s".format(elapsed))   
    c_gpu = np.empty_like(a)  
    cl._enqueue_read_buffer(queue, c_buffer, c_gpu).wait()  
    gpu_end_time = time()  
    print("GPU Time: {0} s".format(gpu_end_time - gpu_start_time))   
    return c_gpu   

#start the test 
開發者ID:PacktPublishing,項目名稱:Python-Parallel-Programming-Cookbook-Second-Edition,代碼行數:39,代碼來源:testApplicationPyopencl.py

示例6: get_context

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def get_context():
    contexts = []
    platforms = cl.get_platforms()
    for platform in platforms:
        devices = platform.get_devices()
        for device in devices:
            try:
                context = cl.Context(devices=[device])
                contexts.append(context)
            except:
                print('Can NOT create context from P(%s)-D(%s)'%(platform, device))
                continue
    return contexts[0] if len(contexts) > 0 else None 
開發者ID:PyOCL,項目名稱:OpenCLGA,代碼行數:15,代碼來源:main.py

示例7: __create_context

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def __create_context(self):
        self.platform = cl.get_platforms()[self.platform_index]
        assert self.platform is not None
        self.device = self.platform.get_devices()[self.device_index]
        assert self.device is not None
        self.dev_type = self.device.get_info(di.TYPE)
        self.context = cl.Context(devices=[self.device])
        return self.context

    ## Create opencl context according to specific information. 
開發者ID:PyOCL,項目名稱:OpenCLGA,代碼行數:12,代碼來源:ocl_ga_client.py

示例8: context

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def context(self):
        if self.domain is not OpenCLMemory:
            return Context(self.domain)

        if self.vcl_sub_context:
            return Context(self.vcl_sub_context)
        else:
            return None 
開發者ID:viennacl,項目名稱:pyviennacl-dev,代碼行數:10,代碼來源:backend.py

示例9: __init__

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def __init__(self, batchSize, maxT, maxC, kernelVariant=1, enableGPUDebug=False):
		"specify size: number of batch elements, number of time-steps, number of characters. Set kernelVariant to either 1 or 2. Set enableGPUDebug to True to debug kernel via CodeXL."

		# force rebuild of program such that GPU debugger can attach to kernel
		self.enableGPUDebug = enableGPUDebug
		if enableGPUDebug:
			os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
			os.environ['PYOPENCL_NO_CACHE'] = '1'

		#consts
		self.batchSize = batchSize
		self.maxT = maxT
		self.maxC = maxC
		assert kernelVariant in [1, 2]
		self.kernelVariant = kernelVariant

		# platform, context, queue
		platforms = cl.get_platforms()
		assert platforms
		self.platform = platforms[0] # take first platform
		devices = self.platform.get_devices(cl.device_type.GPU) # get GPU devices
		assert devices
		self.device = devices[0] # take first GPU
		self.context = cl.Context([self.device]) # context contains the first GPU
		self.queue = cl.CommandQueue(self.context, self.device) # command queue to first GPU

		# buffer
		sizeOfFloat32 = 4
		batchBufSize = batchSize * maxC * maxT * sizeOfFloat32
		self.batchBuf = cl.Buffer(self.context, cl.mem_flags.READ_ONLY, size=batchBufSize, hostbuf=None)
		self.res = np.zeros([batchSize, maxT]).astype(np.int32)
		self.resBuf = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.res.nbytes)
		self.tmpBuf = cl.Buffer(self.context, cl.mem_flags.WRITE_ONLY, self.res.nbytes)

		# compile program and use defines for program-constants to avoid passing private variables
		buildOptions = '-D STEP_BEGIN={} -D MAX_T={} -D MAX_C={}'.format(2 ** math.ceil(math.log2(maxT)), maxT, maxC)
		self.program = cl.Program(self.context, open('BestPathCL.cl').read()).build(buildOptions)

		# variant 1: single pass
		if kernelVariant == 1:
			self.kernel1 = cl.Kernel(self.program, 'bestPathAndCollapse')
			self.kernel1.set_arg(0, self.batchBuf)
			self.kernel1.set_arg(1, self.resBuf)

			# all time-steps must fit into a work-group
			assert maxT <= self.kernel1.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device)

		# variant 2: two passes
		else:
			# kernel1: calculate best path
			self.kernel1 = cl.Kernel(self.program, 'bestPath')
			self.kernel1.set_arg(0, self.batchBuf)
			self.kernel1.set_arg(1, self.tmpBuf)

			# kernel2: collapse best path
			self.kernel2 = cl.Kernel(self.program, 'collapsePath')
			self.kernel2.set_arg(0, self.tmpBuf)
			self.kernel2.set_arg(1, self.resBuf)

			# all chars must fit into a work-group
			assert maxC <= self.kernel1.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device) 
開發者ID:githubharald,項目名稱:CTCDecoder,代碼行數:63,代碼來源:BestPathCL.py

示例10: load_kernel

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def load_kernel(self):
		self.context = cl.Context([self.device], None, None)
		if (self.device.extensions.find('cl_amd_media_ops') != -1):
			self.defines += ' -DBITALIGN'
			if self.device_name in ['Cedar',
									'Redwood',
									'Juniper',
									'Cypress',
									'Hemlock',
									'Caicos',
									'Turks',
									'Barts',
									'Cayman',
									'Antilles',
									'Wrestler',
									'Zacate',
									'WinterPark',
									'BeaverCreek']:
				self.defines += ' -DBFI_INT'

		kernel_file = open('phatk.cl', 'r')
		kernel = kernel_file.read()
		kernel_file.close()
		m = md5(); m.update(''.join([self.device.platform.name, self.device.platform.version, self.device.name, self.defines, kernel]))
		cache_name = '%s.elf' % m.hexdigest()
		binary = None
		try:
			binary = open(cache_name, 'rb')
			self.program = cl.Program(self.context, [self.device], [binary.read()]).build(self.defines)
		except (IOError, cl.LogicError):
			self.program = cl.Program(self.context, kernel).build(self.defines)
			if (self.defines.find('-DBFI_INT') != -1):
				patchedBinary = patch(self.program.binaries[0])
				self.program = cl.Program(self.context, [self.device], [patchedBinary]).build(self.defines)
			binaryW = open(cache_name, 'wb')
			binaryW.write(self.program.binaries[0])
			binaryW.close()
		finally:
			if binary: binary.close()

		self.kernel = self.program.search

		if not self.worksize:
			self.worksize = self.kernel.get_work_group_info(cl.kernel_work_group_info.WORK_GROUP_SIZE, self.device) 
開發者ID:theRealTacoTime,項目名稱:poclbm,代碼行數:46,代碼來源:OpenCLMiner.py

示例11: init_cl

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def init_cl(self,
                id_platform=-1,
                id_device=-1,
                use_gpu=True,
                print_info=False,
                context_properties=None):

        platforms = pyopencl.get_platforms()
        if len(platforms)==0:
            raise Exception("Failed to find any OpenCL platforms.")

        device_types = [pyopencl.device_type.GPU, pyopencl.device_type.CPU]

        # get all platforms and devices
        all_platforms_devs = dict([((_ip, _id, t), d)
                                   for _ip, p in enumerate(platforms)
                                   for _it, t in enumerate(device_types)
                                   for _id, d in enumerate(p.get_devices(t))])

        if len(all_platforms_devs)==0:
            raise Exception("Failed to find any OpenCL platform or device.")

        device_type = pyopencl.device_type.GPU if use_gpu else pyopencl.device_type.CPU

        device = None

        # try to get the prefered platform...
        # otherwise choose the best one
        try:
            device = all_platforms_devs[(id_platform, id_device, device_type)]
        except KeyError:
            logger.warning("prefered platform/device (%s/%s) not available (device type = %s) \n"
                           "...choosing the best from the rest"%
                           (id_platform, id_device, device_type))
            # get the best available device
            device, _ = max([(d, t) for (_ip, _id, t), d in all_platforms_devs.items()],
                            key=OCLDevice.device_priority)

        if device is None:
            raise Exception("Failed to find a valid device")

        self.context = pyopencl.Context(devices=[device],
                                        properties=context_properties)

        self.device = device

        self.queue = pyopencl.CommandQueue(self.context,
                                           properties=pyopencl.command_queue_properties.PROFILING_ENABLE)

        self.imageformats = pyopencl.get_supported_image_formats(self.context,
                                                                 pyopencl.mem_flags.READ_WRITE,
                                                                 pyopencl.mem_object_type.IMAGE3D)

        logger.info("intialized, device: {}".format(self.device))
        if print_info:
            self.print_info() 
開發者ID:maweigert,項目名稱:gputools,代碼行數:58,代碼來源:ocldevice.py

示例12: gs_mod_gpu

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def gs_mod_gpu(idata,itera=10,osize=256):
    
    
    cut=osize//2
    
    pl=cl.get_platforms()[0]
    devices=pl.get_devices(device_type=cl.device_type.GPU)
    ctx = cl.Context(devices=[devices[0]])
    queue = cl.CommandQueue(ctx)

    plan = Plan(idata.shape, queue=queue,dtype=complex128) #no funciona con "complex128"
    
    src = str(Template(KERNEL).render(
        double_support=all(
            has_double_support(dev) for dev in devices),
        amd_double_support=all(
            has_amd_double_support(dev) for dev in devices)
        ))
    prg = cl.Program(ctx,src).build() 
    

    idata_gpu=cl_array.to_device(queue, ifftshift(idata).astype("complex128"))
    fdata_gpu=cl_array.empty_like(idata_gpu)
    rdata_gpu=cl_array.empty_like(idata_gpu)
    plan.execute(idata_gpu.data,fdata_gpu.data)
    
    mask=exp(2.j*pi*random(idata.shape))
    mask[512-cut:512+cut,512-cut:512+cut]=0
    
    
    idata_gpu=cl_array.to_device(queue, ifftshift(idata+mask).astype("complex128"))
    fdata_gpu=cl_array.empty_like(idata_gpu)
    rdata_gpu=cl_array.empty_like(idata_gpu)
    error_gpu=cl_array.to_device(ctx, queue, zeros(idata_gpu.shape).astype("double"))
    plan.execute(idata_gpu.data,fdata_gpu.data)
    
    e=1000
    ea=1000
    for i in range (itera):
        prg.norm(queue, fdata_gpu.shape, None,fdata_gpu.data)
        plan.execute(fdata_gpu.data,rdata_gpu.data,inverse=True)
        #~ prg.norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
        norm1=prg.norm1
        norm1.set_scalar_arg_dtypes([None, None, None, int32])
        norm1(queue, rdata_gpu.shape,None,rdata_gpu.data,idata_gpu.data,error_gpu.data, int32(cut))
        
        e= sqrt(cl_array.sum(error_gpu).get())/(2*cut)

        #~ if e>ea: 
           #~ 
            #~ break
        #~ ea=e
        plan.execute(rdata_gpu.data,fdata_gpu.data)
    
    fdata=fdata_gpu.get()
    fdata=ifftshift(fdata)
    fdata=exp(1.j*angle(fdata))
    return fdata 
開發者ID:cihologramas,項目名稱:pyoptools,代碼行數:60,代碼來源:gs.py

示例13: __init__

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def __init__(self, *args, **kwargs):
        """Do initialisation tasks common to all Leaf subclasses, then pass
        control onto the overridden ``_init_leaf`` function.

        Tasks include expression computation and configuration of data types
        and views.
        """
        self._context = None
        self.dtype = None

        args = list(args)
        for arg in args:
            REMOVE_ARG = False

            if isinstance(arg, list):
                for item in arg:
                    if isinstance(item, MagicMethods):
                        arg[arg.index(item)] = item.value

            #if isinstance(arg, number):
            #    args[args.index(arg)] = asscalar(arg)

            ARG_IS_NUMBER = False
            try:
                if issubclass(arg, number) or issubclass(arg, dtype):
                    ARG_IS_NUMBER = True
            except TypeError: pass
            if ARG_IS_NUMBER:
                self.dtype = np_result_type(arg)
                REMOVE_ARG = True

            ARG_IS_MEM_DOMAIN = False
            try:
                if issubclass(arg, backend.MemoryDomain):
                    ARG_IS_MEM_DOMAIN = True
            except TypeError: pass
            if ARG_IS_MEM_DOMAIN or isinstance(arg, backend.Context):
                self._context = backend.Context(arg)
                REMOVE_ARG = True
            elif WITH_OPENCL:
                if isinstance(arg, cl.Context):
                    self._context = backend.Context(arg)
                    REMOVE_ARG = True

            if REMOVE_ARG:
                args.remove(arg)

        if 'context' in kwargs.keys():
            self._context = backend.Context(kwargs['context'])
        elif self._context is None:
            self._context = backend.default_context

        if 'dtype' in kwargs.keys():    
            self.dtype = dtype(kwargs['dtype']) 

        if 'view_of' in kwargs.keys():
            self.view_of = kwargs['view_of']
        if 'view' in kwargs.keys():
            self.view = kwargs['view']

        self._init_leaf(args, kwargs) 
開發者ID:viennacl,項目名稱:pyviennacl-dev,代碼行數:63,代碼來源:pycore.py

示例14: __init__

# 需要導入模塊: import pyopencl [as 別名]
# 或者: from pyopencl import Context [as 別名]
def __init__(self, domain_or_context=DefaultMemory):
        create_vcl_context_from = None # set this later

        if domain_or_context is None:
            domain_or_context = DefaultMemory

        if isinstance(domain_or_context, Context):
            self.domain = domain_or_context.domain
            self.vcl_context = domain_or_context.vcl_context
            self.sub_context = domain_or_context.sub_context
            #if self.domain is OpenCLMemory:
            #    vcl.set_active_context(self)
            return

        if WITH_OPENCL:
            if isinstance(domain_or_context, cl.Context):
                self.domain = OpenCLMemory
                self.sub_context = domain_or_context
                create_vcl_context_from = vcl.get_viennacl_object(self.sub_context)

        try:
            domain_or_context_is_domain = issubclass(domain_or_context, MemoryDomain)
        except TypeError:
            domain_or_context_is_domain = False

        if domain_or_context_is_domain: # cf default arg
            self.domain = domain_or_context
            if domain_or_context is OpenCLMemory:
                self.sub_context = vcl.default_context
                create_vcl_context_from = vcl.get_viennacl_object(self.sub_context)
            else:
                create_vcl_context_from = self.domain.vcl_memory_type

        if create_vcl_context_from is None:
            raise TypeError("Cannot handle argument of type %s. Note: WITH_OPENCL is %s." % (type(domain_or_context), WITH_OPENCL))

        self.vcl_context = _v.context(create_vcl_context_from)
        if self.domain is OpenCLMemory:
            for device in self.devices:
                if not self.queues[device]:
                    self.add_queue(device)
            if not self.cache_path:
                new_path = appdirs.user_data_dir
                if not os.path.isdir(new_path):
                    try: os.makedirs(new_path)
                    except: pass
                try:
                    new_path = os.path.join(new_path, '')
                    open(os.path.join(new_path, 'permission_test'), 'a+')
                except OSError as e:
                    log.warning("Could not open cache path '%s' for writing, disabling kernel cache. Exception was: %s" % (new_path, e))
                    new_path = ''
                self.cache_path = new_path
        #if self.domain is OpenCLMemory:
        #    vcl.set_active_context(self) 
開發者ID:viennacl,項目名稱:pyviennacl-dev,代碼行數:57,代碼來源:backend.py


注:本文中的pyopencl.Context方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。