當前位置: 首頁>>代碼示例>>Python>>正文


Python driver.Event方法代碼示例

本文整理匯總了Python中pycuda.driver.Event方法的典型用法代碼示例。如果您正苦於以下問題:Python driver.Event方法的具體用法?Python driver.Event怎麽用?Python driver.Event使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pycuda.driver的用法示例。


在下文中一共展示了driver.Event方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _get_events

# 需要導入模塊: from pycuda import driver [as 別名]
# 或者: from pycuda.driver import Event [as 別名]
def _get_events():
    return (drv.Event(), drv.Event()) 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:4,代碼來源:util.py

示例2: __init__

# 需要導入模塊: from pycuda import driver [as 別名]
# 或者: from pycuda.driver import Event [as 別名]
def __init__(self, transformer, comm, op):
        super(CudaAllReduceKernel, self).__init__(transformer)
        self.op = op
        self.tensor = op.tensor_description()
        self.device_id = int(transformer.device_id)
        self.device_ids = list(map(int, self.op.device_ids))
        self.event = drv.Event(flags=event_flags.INTERPROCESS | event_flags.DISABLE_TIMING)
        self.stream = drv.Stream()
        self.output_buff_dict = {}
        self.scratch_buff_dict = {}
        self.event_buff_dict = {}
        self.comm = comm
        self.init_buffers() 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:15,代碼來源:tensor_ops.py

示例3: init_buffers

# 需要導入模塊: from pycuda import driver [as 別名]
# 或者: from pycuda.driver import Event [as 別名]
def init_buffers(self):
        shape = self.op.args[0].tensor_description().shape
        dtype = self.op.args[0].tensor_description().dtype

        n_devs = len(self.op.device_ids)
        size = self.op.args[0].tensor_description().axes.size
        segment_size = calculate_segment_size(size, n_devs)

        # Allocate output and scratch buffers
        self.output_buff = gpuarray.zeros(shape, dtype)
        self.scratch_buff = gpuarray.zeros(segment_size * n_devs, dtype)

        self.output_buff_dict[self.device_id] = self.output_buff.gpudata
        self.scratch_buff_dict[self.device_id] = self.scratch_buff.gpudata

        # Allocate IPC handles
        output_ipc_hdl = drv.mem_get_ipc_handle(self.output_buff.gpudata)
        scratch_ipc_hdl = drv.mem_get_ipc_handle(self.scratch_buff.gpudata)
        event_ipc_hdl = self.event.ipc_handle()

        # Broadcast handles to others
        msg = (self.device_id, output_ipc_hdl, scratch_ipc_hdl, event_ipc_hdl)
        for i in self.device_ids:
            if i == self.device_id:
                self.comm.bcast(msg, root=i)
            else:
                (peer_id,
                 output_ipc_hdl,
                 scratch_ipc_hdl,
                 event_ipc_hdl) = self.comm.bcast(None, root=i)

                output_hdl = drv.IPCMemoryHandle(output_ipc_hdl)
                scratch_hdl = drv.IPCMemoryHandle(scratch_ipc_hdl)
                event_hdl = drv.Event.from_ipc_handle(event_ipc_hdl)
                self.output_buff_dict[peer_id] = output_hdl
                self.scratch_buff_dict[peer_id] = scratch_hdl
                self.event_buff_dict[peer_id] = event_hdl 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:39,代碼來源:tensor_ops.py

示例4: init_mark

# 需要導入模塊: from pycuda import driver [as 別名]
# 或者: from pycuda.driver import Event [as 別名]
def init_mark(self):
        """
        Generate a timing mark object.

        Returns:
            timing mark (pycude driver event)
        """
        return drv.Event() 
開發者ID:NervanaSystems,項目名稱:neon,代碼行數:10,代碼來源:nervanagpu.py

示例5: _get_events

# 需要導入模塊: from pycuda import driver [as 別名]
# 或者: from pycuda.driver import Event [as 別名]
def _get_events():
    return (drv.Event(), drv.Event())

# debugging tool
# import re
# import traceback as tb

# nrv_re = re.compile(r'nervanagpu\.py$')
# def print_trace():
#     caller = None
#     for frame in tb.extract_stack():
#         if GPUTensor.nrv_re.search(frame[0]):
#             break
#         caller = (frame[0],frame[1])
#     print caller 
開發者ID:NervanaSystems,項目名稱:neon,代碼行數:17,代碼來源:nervanagpu.py


注:本文中的pycuda.driver.Event方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。