当前位置: 首页>>代码示例>>Python>>正文


Python driver.Event方法代码示例

本文整理汇总了Python中pycuda.driver.Event方法的典型用法代码示例。如果您正苦于以下问题:Python driver.Event方法的具体用法?Python driver.Event怎么用?Python driver.Event使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pycuda.driver的用法示例。


在下文中一共展示了driver.Event方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_events

# 需要导入模块: from pycuda import driver [as 别名]
# 或者: from pycuda.driver import Event [as 别名]
def _get_events():
    return (drv.Event(), drv.Event()) 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:4,代码来源:util.py

示例2: __init__

# 需要导入模块: from pycuda import driver [as 别名]
# 或者: from pycuda.driver import Event [as 别名]
def __init__(self, transformer, comm, op):
        super(CudaAllReduceKernel, self).__init__(transformer)
        self.op = op
        self.tensor = op.tensor_description()
        self.device_id = int(transformer.device_id)
        self.device_ids = list(map(int, self.op.device_ids))
        self.event = drv.Event(flags=event_flags.INTERPROCESS | event_flags.DISABLE_TIMING)
        self.stream = drv.Stream()
        self.output_buff_dict = {}
        self.scratch_buff_dict = {}
        self.event_buff_dict = {}
        self.comm = comm
        self.init_buffers() 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:15,代码来源:tensor_ops.py

示例3: init_buffers

# 需要导入模块: from pycuda import driver [as 别名]
# 或者: from pycuda.driver import Event [as 别名]
def init_buffers(self):
        shape = self.op.args[0].tensor_description().shape
        dtype = self.op.args[0].tensor_description().dtype

        n_devs = len(self.op.device_ids)
        size = self.op.args[0].tensor_description().axes.size
        segment_size = calculate_segment_size(size, n_devs)

        # Allocate output and scratch buffers
        self.output_buff = gpuarray.zeros(shape, dtype)
        self.scratch_buff = gpuarray.zeros(segment_size * n_devs, dtype)

        self.output_buff_dict[self.device_id] = self.output_buff.gpudata
        self.scratch_buff_dict[self.device_id] = self.scratch_buff.gpudata

        # Allocate IPC handles
        output_ipc_hdl = drv.mem_get_ipc_handle(self.output_buff.gpudata)
        scratch_ipc_hdl = drv.mem_get_ipc_handle(self.scratch_buff.gpudata)
        event_ipc_hdl = self.event.ipc_handle()

        # Broadcast handles to others
        msg = (self.device_id, output_ipc_hdl, scratch_ipc_hdl, event_ipc_hdl)
        for i in self.device_ids:
            if i == self.device_id:
                self.comm.bcast(msg, root=i)
            else:
                (peer_id,
                 output_ipc_hdl,
                 scratch_ipc_hdl,
                 event_ipc_hdl) = self.comm.bcast(None, root=i)

                output_hdl = drv.IPCMemoryHandle(output_ipc_hdl)
                scratch_hdl = drv.IPCMemoryHandle(scratch_ipc_hdl)
                event_hdl = drv.Event.from_ipc_handle(event_ipc_hdl)
                self.output_buff_dict[peer_id] = output_hdl
                self.scratch_buff_dict[peer_id] = scratch_hdl
                self.event_buff_dict[peer_id] = event_hdl 
开发者ID:NervanaSystems,项目名称:ngraph-python,代码行数:39,代码来源:tensor_ops.py

示例4: init_mark

# 需要导入模块: from pycuda import driver [as 别名]
# 或者: from pycuda.driver import Event [as 别名]
def init_mark(self):
        """
        Generate a timing mark object.

        Returns:
            timing mark (pycude driver event)
        """
        return drv.Event() 
开发者ID:NervanaSystems,项目名称:neon,代码行数:10,代码来源:nervanagpu.py

示例5: _get_events

# 需要导入模块: from pycuda import driver [as 别名]
# 或者: from pycuda.driver import Event [as 别名]
def _get_events():
    return (drv.Event(), drv.Event())

# debugging tool
# import re
# import traceback as tb

# nrv_re = re.compile(r'nervanagpu\.py$')
# def print_trace():
#     caller = None
#     for frame in tb.extract_stack():
#         if GPUTensor.nrv_re.search(frame[0]):
#             break
#         caller = (frame[0],frame[1])
#     print caller 
开发者ID:NervanaSystems,项目名称:neon,代码行数:17,代码来源:nervanagpu.py


注:本文中的pycuda.driver.Event方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。