当前位置: 首页>>代码示例>>Python>>正文


Python pyopencl.create_some_context函数代码示例

本文整理汇总了Python中pyopencl.create_some_context函数的典型用法代码示例。如果您正苦于以下问题:Python create_some_context函数的具体用法?Python create_some_context怎么用?Python create_some_context使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了create_some_context函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, profile=False, device=None, manual=False):
        """
        Initialize a device, a context and a queue.
        The preferred device is a NVIDIA GPU with maximum compute capability.

        @param profile : (optional) if True, enable profiling of the OpenCL events
        @param device : (optional) device in the format (0, 0)
        @param manual : (optional) if True, choose manually a device from the PyOpenCL prompt.
        """
        platforms = cl.get_platforms()

        if manual:
            self.ctx = cl.create_some_context()
            self.device = ctx.devices[0]

        elif device:
            self.device = platforms[device[0]].get_devices()[device[1]]
            self.ctx = cl.Context([self.device])

        else:
            # Try to choose a NVIDIA card with best compute capability
            cc_max = -1
            cc_argmax = (0, 0)
            for i_p, p in enumerate(platforms):
                for i_dev, dev in enumerate(p.get_devices()):
                    try:
                        cc = dev.compute_capability_major_nv + 0.1 * dev.compute_capability_minor_nv
                        if cc > cc_max:
                            cc_max = cc
                            cc_argmax = (i_p, i_dev)
                    except:
                        pass
            if cc_max == -1:
                print("Warning: could not find a NVIDIA card. Please pick up manually the target device")
                self.ctx = cl.create_some_context()
                self.device = ctx.devices[0]
            else:
                self.device = platforms[cc_argmax[0]].get_devices()[cc_argmax[1]]
                self.ctx = cl.Context([self.device])
            # ------------
        self.devicename = self.device.name
        if profile:
            self.queue = cl.CommandQueue(self.ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
        else:
            self.queue = cl.CommandQueue(self.ctx)
        self.mf = cl.mem_flags
        self.path = []
        self.book = {}
开发者ID:pierrepaleo,项目名称:opencl_intro,代码行数:48,代码来源:oclutils.py

示例2: __init__

    def __init__(self, lmb, prompt=False, user_dev_selection=None, bindings=None):
        """
        """
	assert not (prompt and user_dev_selection), "Can't ask for @prompt and provide @user_dev_selection at the same time"
	self.user_dev_selection = user_dev_selection

	if prompt:
	    self.user_dev_selection = None if Py2OpenCL.only_one_device() \
		else self.init()

        self.ctx = cl.create_some_context( interactive=False, answers=self.user_dev_selection ) \
		if self.user_dev_selection else cl.create_some_context()

        self.queue = cl.CommandQueue(self.ctx)
        self.bindings = bindings
        self.lmb = lmb
开发者ID:hervold,项目名称:py2opencl,代码行数:16,代码来源:driver.py

示例3: test_cl

def test_cl():
    ctx = cl.create_some_context()  # (interactive=False)

    # print 'ctx', ctx
    queue = cl.CommandQueue(ctx, properties=cl.command_queue_properties.PROFILING_ENABLE)
    f = open("part1.cl", "r")
    fstr = "".join(f.readlines())
    program = cl.Program(ctx, fstr).build()
    mf = cl.mem_flags

    cameraPos = np.array([0, 6, -1, 0])
    invView = la.inv(look_at((0, 6, -1), (0, 1, 1), (0, 1, 0)))
    invProj = la.inv(perspective(60, 1, 1, 1000))
    print "view", invView
    print "proj", invProj
    viewParamsData = (
        cameraPos.flatten().tolist()
        + np.transpose(invView).flatten().tolist()
        + np.transpose(invProj).flatten().tolist()
    )
    # print 'vpd', viewParamsData
    viewParams = struct.pack("4f16f16f", *viewParamsData)
    viewParams_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=viewParams)
    num_pixels = 1000 * 1000
    # setup opencl
    dest = np.ndarray((1000, 1000, 4), dtype=np.float32)
    dest_buf = cl.Buffer(ctx, mf.WRITE_ONLY, dest.nbytes)
    local_shape = (8, 8)
    # run kernel
    evt = program.part1(queue, (dest.shape[0], dest.shape[1]), None, viewParams_buf, dest_buf)
    # evt = program.part1(queue, dest.shape, None, dest_buf)
    cl.enqueue_read_buffer(queue, dest_buf, dest).wait()
    print "time", (evt.profile.end - evt.profile.start) * 0.000001, "ms"
    return dest
开发者ID:jameszhao00,项目名称:lightwayrt,代码行数:34,代码来源:main.py

示例4: test_bitonic_argsort

def test_bitonic_argsort(ctx_factory, size, dtype):
    ctx = cl.create_some_context()
    queue = cl.CommandQueue(ctx)

    dev = ctx.devices[0]
    if (dev.platform.name == "Apple" and dev.type & cl.device_type.CPU):
        pytest.xfail("Bitonic sort won't work on Apple CPU: no workgroup "
            "parallelism")
    if (dev.platform.name == "Portable Computing Language"
            and dtype == np.float64):
        pytest.xfail("Double precision bitonic sort doesn't work on POCL")

    import pyopencl.clrandom as clrandom
    from pyopencl.bitonic_sort import BitonicSort

    index = cl_array.arange(queue, 0, size, 1, dtype=np.int32)
    m = clrandom.rand(queue, (size,), dtype, luxury=None, a=0, b=239432234)

    sorterm = BitonicSort(ctx)

    ms, evt = sorterm(m.copy(), idx=index, axis=0)

    assert np.array_equal(np.sort(m.get()), ms.get())

    # may be False because of identical values in array
    # assert np.array_equal(np.argsort(m.get()), index.get())

    # Check values by indices
    assert np.array_equal(m.get()[np.argsort(m.get())], m.get()[index.get()])
开发者ID:god1991,项目名称:pyopencl,代码行数:29,代码来源:test_algorithm.py

示例5: __init__

    def __init__(self, cl_mode = True, cl_device = None):
        """Initialize the class.
        """
        if cl_mode:
            import pyopencl as cl
            import pyopencl.array
            if cl_device == 'gpu':
                gpu_devices = []
                for platform in cl.get_platforms():
                    try: gpu_devices += platform.get_devices(device_type=cl.device_type.GPU)
                    except: pass
                self.ctx = cl.Context(gpu_devices)
            elif cl_device == 'cpu':
                cpu_devices = []
                for platform in cl.get_platforms():
                    try: cpu_devices += platform.get_devices(device_type=cl.device_type.CPU)
                    except: pass
                self.ctx = cl.Context([cpu_devices[0]])
            else:
                self.ctx = cl.create_some_context()

            self.queue = cl.CommandQueue(self.ctx)
            self.mf = cl.mem_flags
            self.device = self.ctx.get_info(cl.context_info.DEVICES)[0]
            self.device_type = self.device.type
            self.device_compute_units = self.device.max_compute_units

        self.cl_mode = cl_mode
        self.obs = []
        self.samples = {}
开发者ID:tqian86,项目名称:MPBST,代码行数:30,代码来源:predictor.py

示例6: calc_range

def calc_range(start, num, perexec):
	"""Calculate the otp-md5 of the 64-bit numbers range(start, num),
	   with otp sequence of rounds."""

	assert(num % perexec == 0)

	# Boilerplate OpenCL stuff
	ctx = cl.create_some_context()
	queue = cl.CommandQueue(ctx)
	mf = cl.mem_flags

	# Read the program source and compile
	sourcecode = open("otpmd5.cl").read()
	prg = cl.Program(ctx, sourcecode).build()

	for i in xrange(num / perexec):
		offset = start + (perexec * i)

		host_input = numpy.arange(offset, offset+perexec, dtype=numpy.uint64)
		result = numpy.empty_like(host_input)
		dev_input = cl.Buffer(ctx, mf.READ_ONLY | mf.USE_HOST_PTR, hostbuf=host_input)
		dev_output = cl.Buffer(ctx, mf.READ_WRITE, size=result.size * result.itemsize)
		prg.get_otpmd5_64k_rounds(queue, host_input.shape, None, dev_input, dev_output).wait()
		cl.enqueue_copy(queue, result, dev_output).wait()
		send_output(host_input, result)
开发者ID:therealmik,项目名称:otpbreak,代码行数:25,代码来源:create_otp_blocks.py

示例7: __init__

    def __init__(self, coords, values, base, wantCL=True, split=None, nnear=None, majority=True):
        self.coords = np.asarray(coords, dtype=np.int32)
        self.values = np.asarray(values, dtype=np.int32)
        self.base = np.asarray(base, dtype=np.int32)
        lencoords = self.coords.shape[0]
        lenvalues = self.values.shape[0]
        assert lencoords == lenvalues, "lencoords does not equal lenvalues"

        self.wantCL = wantCL
        if hasCL == True and self.wantCL == True:
            if split == None:
                self.split = CLIDT.OpenCLmaxsize
            else:
                self.split = split
            try:
                self.ctx = cl.create_some_context()
                self.queue = cl.CommandQueue(self.ctx)
                filestr = "".join(open("idt.cl", "r").readlines())
                self.program = cl.Program(self.ctx, filestr).build()
                self.coordindices = self.genindices(self.coords)
                self.baseindices = self.genindices(self.base)
                self.canCL = True
            # FIXME: specify an exception type
            except:
                print "warning: unable to use pyopencl, defaulting to Invdisttree"
                self.canCL = False
        else:
            self.canCL = False

        if nnear == None:
            self.nnear = np.int32(CLIDT.nnear)
        else:
            self.nnear = np.int32(nnear)

        self.usemajority = np.int32(1 if majority else 0)
开发者ID:KermMartian,项目名称:TopoMC,代码行数:35,代码来源:clidt.py

示例8: __init__

    def __init__(self, network, dt=0.001, seed=None, model=None, context=None,
                 n_prealloc_probes=32, profiling=None, ocl_only=False):
        if context is None:
            print('No context argument was provided to sim_ocl.Simulator')
            print("Calling pyopencl.create_some_context() for you now:")
            context = cl.create_some_context()
        if profiling is None:
            profiling = int(os.getenv("NENGO_OCL_PROFILING", 0))
        self.context = context
        self.profiling = profiling
        if self.profiling:
            self.queue = cl.CommandQueue(context, properties=PROFILING_ENABLE)
        else:
            self.queue = cl.CommandQueue(context)

        self.n_prealloc_probes = n_prealloc_probes
        self.ocl_only = ocl_only
        self.cl_rng_state = None

        # -- allocate data
        sim_npy.Simulator.__init__(
            self, network=network, dt=dt, seed=seed, model=model)

        # -- create object to execute list of plans
        self._plans = Plans(self._plan, self.profiling)
开发者ID:MarcoSaku,项目名称:Spiking-C3D,代码行数:25,代码来源:sim_ocl.py

示例9: gpu_array_sum

def gpu_array_sum(a, b):
    context = cl.create_some_context()  # Initialize the Context
    queue = cl.CommandQueue(context, properties=cl.command_queue_properties.PROFILING_ENABLE)  # Instantiate a Queue with profiling (timing) enabled
    a_buffer = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=a)
    b_buffer = cl.Buffer(context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=b)
    c_buffer = cl.Buffer(context, cl.mem_flags.WRITE_ONLY, b.nbytes)  # Create three buffers (plans for areas of memory on the device)
    program = cl.Program(context, """
    __kernel void sum(__global const float *a, __global const float *b, __global float *c)
    {
        int i = get_global_id(0);
        int j;
        for(j = 0; j < 1000; j++)
        {
            c[i] = a[i] + b[i];
        }
    }""").build()  # Compile the device program
    gpu_start_time = time()  # Get the GPU start time
    event = program.sum(queue, a.shape, None, a_buffer, b_buffer, c_buffer)  # Enqueue the GPU sum program XXX
    event.wait()  # Wait until the event finishes XXX
    elapsed = 1e-9*(event.profile.end - event.profile.start)  # Calculate the time it took to execute the kernel
    print("GPU Kernel Time: {0} s".format(elapsed))  # Print the time it took to execute the kernel
    c_gpu = np.empty_like(a)  # Create an empty array the same size as array a
    cl.enqueue_read_buffer(queue, c_buffer, c_gpu).wait()  # Read back the data from GPU memory into array c_gpu
    gpu_end_time = time()  # Get the GPU end time
    print("GPU Time: {0} s".format(gpu_end_time - gpu_start_time))  # Print the time the GPU program took, including both memory copies
    return c_gpu  # Return the sum of the two arrays
开发者ID:Rejzor,项目名称:Python_stuff,代码行数:26,代码来源:030_timing.py

示例10: gpu_gradient

def gpu_gradient():
				
	if len(sys.argv) != 3:
		print "USAGE: " + sys.argv[0] + " <inputImageFile> <outputImageFile>"
		return 1
	
	# create context and command queue
	ctx = cl.create_some_context()
	queue = cl.CommandQueue(ctx)
	
	# load image
	im = Image.open(sys.argv[1])
	if im.mode != "RGBA":
		im = im.convert("RGBA")
	imgSize = im.size
	buffer = im.tostring() # len(buffer) = imgSize[0] * imgSize[1] * 4

	
	# Create ouput image object
	clImageFormat = cl.ImageFormat(cl.channel_order.RGBA, 
								cl.channel_type.UNSIGNED_INT8)
	input_image = cl.Image(ctx,
								cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,
								clImageFormat,
								imgSize,
								None,
								buffer)
	output_image = cl.Image(ctx,
							cl.mem_flags.WRITE_ONLY,
							clImageFormat,
							imgSize)

	# load the kernel source code
	kernelFile = open("gradient.cl", "r")
	kernelSrc = kernelFile.read()

	# Create OpenCL program
	program = cl.Program(ctx, kernelSrc).build()
	# Call the kernel directly
	globalWorkSize = ( imgSize[0],imgSize[1] ) 
	gpu_start_time = time()
	program.gradient(queue,
							globalWorkSize,
							None,
							input_image,
							output_image)
		
	# Read the output buffer back to the Host
	buffer = numpy.zeros(imgSize[0] * imgSize[1] * 4, numpy.uint8)
	origin = ( 0, 0, 0 )
	region = ( imgSize[0], imgSize[1], 1 )
	
	cl.enqueue_read_image(queue, output_image,
						origin, region, buffer).wait()
	
	# Save the image to disk
	gsim = Image.fromstring("RGBA", imgSize, buffer.tostring())
	gsim.save("GPU_"+sys.argv[2])
	gpu_end_time = time()
	print("GPU Time: {0} s".format(gpu_end_time - gpu_start_time))
开发者ID:Rejzor,项目名称:Python_stuff,代码行数:60,代码来源:gradient.py

示例11: __init__

  def __init__( self, im, fil, fil_1d=None, fil_2d=None, larger_buffer=True, sep=True, buffer_flip=False, type=numpy.float32 ):
    
    self.ctx = cl.create_some_context()
    self.queue = cl.CommandQueue( self.ctx )
    
    self.larger_buffer = larger_buffer
    self.sep = sep # whether or not the convolution is separated into 1D chunks
    self.type = type #TODO: type should just come from the input image, do a check to see if it matches the filter
    self.buffer_flip = buffer_flip # Optimization for separable convolutions where only the x direction is required
    if self.type == numpy.float32:
      self.ctype = 'float'
    elif self.type == numpy.float64:
      self.ctype = 'double'
    else:
      raise TypeError, "Data type specified is not currently supported: " + str( self.type )

    # For special convolutions, if required
    self.fil_1d = fil_1d
    self.fil_1d_origin = 0
    self.fil_2d = fil_2d
    self.fil_2d_origin = ( 0, 0 ) # offset of the center of the filter
    self.max_2d_buffer = False # just set this to false for now, it might be used in the future
    
    if im is not None and fil is not None:
      self.set_params( im, fil )
开发者ID:bjkomer,项目名称:pyratslam,代码行数:25,代码来源:convolution.py

示例12: compile_source

    def compile_source(self):
        self.context = pyopencl.create_some_context()
        self.queue = pyopencl.CommandQueue(self.context)
        self.mf = pyopencl.mem_flags

        opencl_source = load_file("geneticvehicle.cl") % {
                                "vertices_per_car" : self.number_of_vertices_per_car,
                                "number_of_cars" : self.number_of_cars,
                                "density" : self.density,
                                "number_of_wheels" : self.number_of_wheels_per_car,
                                "number_of_contact_points" : self.number_of_contact_points,
                                "island_start" : self.island.island_start,
                                "island_step" : self.island.island_step,
                                "island_end" : self.island.island_end,
                                "island_acceleration"  : int(self.island.island_acceleration),
                                "island_range" : self.island.range(),
                                "crossover_points" : self.crossover_points,
                                "point_mutations" : self.point_mutations}

        self.program = pyopencl.Program(self.context, opencl_source)

        try:
            self.program.build()
        except Exception as why:
            print why
            print(self.program.get_build_info(self.context.devices[0], pyopencl.program_build_info.LOG))
开发者ID:anopheles,项目名称:geneticvehicle,代码行数:26,代码来源:geneticvehicle.py

示例13: test_opencl

def test_opencl():
    import numpy as np
    import pyopencl as cl

    a = np.random.rand(50000).astype(np.float32)
    b = np.random.rand(50000).astype(np.float32)

    context = cl.create_some_context()
    queue = cl.CommandQueue(context)

    mf = cl.mem_flags

    a_cl = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=a)
    b_cl = cl.Buffer(context, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=b)

    program = cl.Program(context, r'''
        __kernel void sum(__global const float * a, __global const float * b, __global float * out) {
            int gid = get_global_id(0);
            out[gid] = a[gid] + b[gid];
        }
    ''').build()

    out_cl = cl.Buffer(context, mf.WRITE_ONLY, a.nbytes)

    program.sum(queue, a.shape, None, a_cl, b_cl, out_cl)

    out = np.empty_like(a)
    cl.enqueue_copy(queue, out, out_cl)

    print(np.linalg.norm(out - (a + b)))
开发者ID:bracket,项目名称:handsome,代码行数:30,代码来源:tasks.py

示例14: init_context_queue

 def init_context_queue(self):
     if self.ctx is None:
         if self.choose_best_device:
             self.ctx = ocl.create_context()
         else:
             self.ctx = cl.create_some_context()
     self.queue = cl.CommandQueue(self.ctx)
开发者ID:dnaudet,项目名称:silx,代码行数:7,代码来源:clfft.py

示例15: __init__

    def __init__(self, seed=None):
        self.ctx = cl.create_some_context()
        self.queue = cl.CommandQueue(self.ctx)
        self.seed = seed

        numpy.random.seed(seed)
        self._compute_seed()
开发者ID:jakogut,项目名称:clsimplex,代码行数:7,代码来源:clsimplex.py


注:本文中的pyopencl.create_some_context函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。