本文整理汇总了Python中pyopencl.get_platforms方法的典型用法代码示例。如果您正苦于以下问题:Python pyopencl.get_platforms方法的具体用法?Python pyopencl.get_platforms怎么用?Python pyopencl.get_platforms使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyopencl
的用法示例。
在下文中一共展示了pyopencl.get_platforms方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_opencl_devices
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def get_opencl_devices():
"""Return a list of available OpenCL devices.
Raises ImportError if OpenCL is not found.
Raises IOError if no OpenCL devices are found.
"""
device_strings = []
platforms = pyopencl.get_platforms() #@UndefinedVariable
for i, platform in enumerate(platforms):
devices = platform.get_devices()
for j, device in enumerate(devices):
device_strings.append('[%d-%d] %s' %
(i, j, merge_whitespace(device.name)[:25]))
if len(device_strings) == 0:
raise IOError
return device_strings
示例2: _get_device
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def _get_device(self, gpuid):
"""Return GPU devices, context, and queue."""
all_platforms = cl.get_platforms()
platform = next((p for p in all_platforms if
p.get_devices(device_type=cl.device_type.GPU) != []),
None)
if platform is None:
raise RuntimeError('No OpenCL GPU device found.')
my_gpu_devices = platform.get_devices(device_type=cl.device_type.GPU)
context = cl.Context(devices=my_gpu_devices)
if gpuid > len(my_gpu_devices)-1:
raise RuntimeError(
'No device with gpuid {0} (available device IDs: {1}).'.format(
gpuid, np.arange(len(my_gpu_devices))))
queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
if self.settings['debug']:
print("Selected Device: ", my_gpu_devices[gpuid].name)
return my_gpu_devices, context, queue
示例3: __setstate__
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def __setstate__(self, state):
self.atomics_flavor = state["atomics_flavor"]
self.fortran_abi = state["fortran_abi"]
self.pyopencl_module_name = state["pyopencl_module_name"]
dev_id = state["device_id"]
if dev_id is None:
self.device = None
else:
import pyopencl as cl
matches = [
dev
for plat in cl.get_platforms()
for dev in plat.get_devices()
if dev.hashable_model_and_version_identifier == dev_id]
if matches:
self.device = matches[0]
else:
raise LoopyError(
"cannot unpickle device '%s': not found"
% dev_id)
示例4: query_devices
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def query_devices(c_p):
import pyopencl as cl
platforms = cl.get_platforms()
devices = platforms[0].get_devices()
data = []
for pidx in range(len(platforms)):
devices = platforms[pidx].get_devices()
for didx in range(len(devices)):
data.append((pidx, didx))
c_p.send(data)
## OpenCLGAWorker is a spawned process which is supposed to run OpenCLGA on a
# target device which is decided by OpenCLGAClient.
# @param platform_index Platform index which is queried and assigned by Client.
# @param device_index Device index which is queried and assigned by Client.
# @param ip The IP of server.
# @param port The listening port of server.
# @var exit_evt A event to wait in method run(), and will be set when receving
# 'exit' command, or terminating.
# @var uuid A unique ID for UI to identify the worker.
# @var running A varialbe shared by client & worker process to identify if worker
# is running or not.
示例5: __init__
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def __init__(self, device=0, platform=0, iterations=7, compiler_options=None):
"""Creates OpenCL device context and reads device properties
:param device: The ID of the OpenCL device to use for benchmarking
:type device: int
:param iterations: The number of iterations to run the kernel during benchmarking, 7 by default.
:type iterations: int
"""
if not cl:
raise ImportError("Error: pyopencl not installed, please install e.g. using 'pip install pyopencl'.")
self.iterations = iterations
#setup context and queue
platforms = cl.get_platforms()
self.ctx = cl.Context(devices=[platforms[platform].get_devices()[device]])
self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
self.mf = cl.mem_flags
#inspect device properties
self.max_threads = self.ctx.devices[0].get_info(cl.device_info.MAX_WORK_GROUP_SIZE)
self.compiler_options = compiler_options or []
#collect environment information
dev = self.ctx.devices[0]
env = dict()
env["platform_name"] = dev.platform.name
env["platform_version"] = dev.platform.version
env["device_name"] = dev.name
env["device_version"] = dev.version
env["opencl_c_version"] = dev.opencl_c_version
env["driver_version"] = dev.driver_version
env["iterations"] = self.iterations
env["compiler_options"] = compiler_options
self.env = env
self.name = dev.name
示例6: initialize_opencl
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def initialize_opencl(self, cl_platform_index=None, cl_device_index=None, ctx=None, queue=None):
assert HAVE_PYOPENCL, 'PyOpenCL is not installed'
global default_platform_index
global default_device_index
if ctx is None and queue is None:
if cl_platform_index is None:
if default_platform_index is not None and default_device_index is not None:
self.cl_platform_index = default_platform_index
self.cl_device_index = default_device_index
self.devices = [pyopencl.get_platforms()[self.cl_platform_index].get_devices()[self.cl_device_index]]
self.ctx = pyopencl.Context(self.devices)
else:
self.ctx = pyopencl.create_some_context(interactive=False)
else:
self.cl_platform_index = cl_platform_index
self.cl_device_index = cl_device_index
self.devices = [pyopencl.get_platforms()[self.cl_platform_index].get_devices()[self.cl_device_index]]
self.ctx = pyopencl.Context(self.devices)
self.queue = pyopencl.CommandQueue(self.ctx)
else:
assert cl_platform_index is None and cl_device_index is None
self.ctx = ctx
self.queue = queue
self.max_wg_size = self.ctx.devices[0].get_info(pyopencl.device_info.MAX_WORK_GROUP_SIZE)
示例7: get_cl_device_list
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def get_cl_device_list():
device_indexes = []
for platform_index, platform in enumerate(pyopencl.get_platforms()):
for device_index,device in enumerate(platform.get_devices()):
device_indexes.append((device.name, platform_index, device_index))
return device_indexes
示例8: has_amd
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def has_amd():
for platform in cl.get_platforms():
if 'amd' in platform.name.lower():
return True
return False
示例9: __init__
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def __init__(self, device_index, options):
super(OpenCLMiner, self).__init__(device_index, options)
self.output_size = 0x100
self.device = cl.get_platforms()[options.platform].get_devices()[device_index]
self.device_name = self.device.name.strip('\r\n \x00\t')
self.frames = 30
self.worksize = self.frameSleep= self.rate = self.estimated_rate = 0
self.vectors = False
self.adapterIndex = None
if ADL and 'amd' in self.device.platform.name.lower() and self.device.type == cl.device_type.GPU:
with adl_lock:
self.adapterIndex = self.get_adapter_info()[self.device_index].iAdapterIndex
示例10: _get_device
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def _get_device(gpuid):
"""Return GPU devices, context, and queue."""
platform = cl.get_platforms()
platf_idx = find_nonempty(platform)
my_gpu_devices = platform[platf_idx].get_devices(
device_type=cl.device_type.GPU)
context = cl.Context(devices=my_gpu_devices)
queue = cl.CommandQueue(context, my_gpu_devices[gpuid])
if VERBOSE:
print(("Selected Device: ", my_gpu_devices[gpuid].name))
return my_gpu_devices, context, queue
示例11: get_all_info
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def get_all_info(self):
platforms = pyopencl.get_platforms()
s = "\n-------- available devices -----------\n"
for p in platforms:
s += "platform: \t%s\n"%p.name
printNames = [["CPU", pyopencl.device_type.CPU],
["GPU", pyopencl.device_type.GPU]]
for name, identifier in printNames:
s += "device type: \t%s\n"%name
try:
for d in p.get_devices(identifier):
s += "\t%s \n"%d.name
except:
s += "nothing found: \t%s\n"%name
infoKeys = ['NAME', 'GLOBAL_MEM_SIZE',
'GLOBAL_MEM_SIZE', 'MAX_MEM_ALLOC_SIZE',
'LOCAL_MEM_SIZE', 'IMAGE2D_MAX_WIDTH',
'IMAGE2D_MAX_HEIGHT', 'IMAGE3D_MAX_WIDTH',
'IMAGE3D_MAX_HEIGHT', 'IMAGE3D_MAX_DEPTH',
'MAX_WORK_GROUP_SIZE', 'MAX_WORK_ITEM_SIZES']
s += "\n-------- currently used device -------\n"
for k in infoKeys:
s += "%s: \t %s\n"%(k, self.get_info(k))
return s
示例12: printplatforms
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def printplatforms(self):
i=0
for platform in cl.get_platforms():
print('Platform %d - Name %s, Vendor %s' %(i,platform.name,platform.vendor))
i+=1
示例13: printfullinfo
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def printfullinfo(self):
print('\n' + '=' * 60 + '\nOpenCL Platforms and Devices')
i=0
for platform in cl.get_platforms():
print('=' * 60)
print('Platform %d - Name: ' %i + platform.name)
print('Platform %d - Vendor: ' %i + platform.vendor)
print('Platform %d - Version: ' %i + platform.version)
print('Platform %d - Profile: ' %i + platform.profile)
for device in platform.get_devices():
print(' ' + '-' * 56)
print(' Device - Name: ' \
+ device.name)
print(' Device - Type: ' \
+ cl.device_type.to_string(device.type))
print(' Device - Max Clock Speed: {0} Mhz' \
.format(device.max_clock_frequency))
print(' Device - Compute Units: {0}' \
.format(device.max_compute_units))
print(' Device - Local Memory: {0:.0f} KB' \
.format(device.local_mem_size / 1024.0))
print(' Device - Constant Memory: {0:.0f} KB' \
.format(device.max_constant_buffer_size / 1024.0))
print(' Device - Global Memory: {0:.0f} GB' \
.format(device.global_mem_size / 1073741824.0))
print(' Device - Max Buffer/Image Size: {0:.0f} MB' \
.format(device.max_mem_alloc_size / 1048576.0))
print(' Device - Max Work Group Size: {0:.0f}' \
.format(device.max_work_group_size))
print('\n')
i+=1
示例14: print_device_info
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def print_device_info() :
print('\n' + '=' * 60 + '\nOpenCL Platforms and Devices')
for platform in cl.get_platforms():
print('=' * 60)
print('Platform - Name: ' + platform.name)
print('Platform - Vendor: ' + platform.vendor)
print('Platform - Version: ' + platform.version)
print('Platform - Profile: ' + platform.profile)
for device in platform.get_devices():
print(' ' + '-' * 56)
print(' Device - Name: ' \
+ device.name)
print(' Device - Type: ' \
+ cl.device_type.to_string(device.type))
print(' Device - Max Clock Speed: {0} Mhz'\
.format(device.max_clock_frequency))
print(' Device - Compute Units: {0}'\
.format(device.max_compute_units))
print(' Device - Local Memory: {0:.0f} KB'\
.format(device.local_mem_size/1024.0))
print(' Device - Constant Memory: {0:.0f} KB'\
.format(device.max_constant_buffer_size/1024.0))
print(' Device - Global Memory: {0:.0f} GB'\
.format(device.global_mem_size/1073741824.0))
print(' Device - Max Buffer/Image Size: {0:.0f} MB'\
.format(device.max_mem_alloc_size/1048576.0))
print(' Device - Max Work Group Size: {0:.0f}'\
.format(device.max_work_group_size))
print('\n')
示例15: test_gpu_vector_sum
# 需要导入模块: import pyopencl [as 别名]
# 或者: from pyopencl import get_platforms [as 别名]
def test_gpu_vector_sum(a, b):
#define the PyOpenCL Context
platform = cl.get_platforms()[0]
device = platform.get_devices()[0]
context = cl.Context([device])
queue = cl.CommandQueue(context, \
properties=cl.command_queue_properties.PROFILING_ENABLE)
#prepare the data structure
a_buffer = cl.Buffer\
(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=a)
b_buffer = cl.Buffer\
(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=b)
c_buffer = cl.Buffer\
(context, cl.mem_flags.WRITE_ONLY, b.nbytes)
program = cl.Program(context, """
__kernel void sum(__global const float *a, __global const float *b, __global float *c)
{
int i = get_global_id(0);
int j;
for(j = 0; j < 10000; j++)
{
c[i] = a[i] + b[i];
}
}""").build()
#start the gpu test
gpu_start_time = time()
event = program.sum(queue, a.shape, None, a_buffer, b_buffer, c_buffer)
event.wait()
elapsed = 1e-9*(event.profile.end - event.profile.start)
print("GPU Kernel evaluation Time: {0} s".format(elapsed))
c_gpu = np.empty_like(a)
cl._enqueue_read_buffer(queue, c_buffer, c_gpu).wait()
gpu_end_time = time()
print("GPU Time: {0} s".format(gpu_end_time - gpu_start_time))
return c_gpu
#start the test
开发者ID:PacktPublishing,项目名称:Python-Parallel-Programming-Cookbook-Second-Edition,代码行数:39,代码来源:testApplicationPyopencl.py