当前位置: 首页>>代码示例>>Python>>正文


Python mpi4py.MPI类代码示例

本文整理汇总了Python中mpi4py.MPI的典型用法代码示例。如果您正苦于以下问题:Python MPI类的具体用法?Python MPI怎么用?Python MPI使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MPI类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testHandleValue

 def testHandleValue(self):
     typemap = {ctypes.sizeof(ctypes.c_uint32): ctypes.c_uint32,
                ctypes.sizeof(ctypes.c_uint64): ctypes.c_uint64}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = uintptr_t.from_address(MPI._addressof(obj))
         self.assertEqual(handle.value, MPI._handleof(obj))
开发者ID:benkirk,项目名称:mpi_playground,代码行数:7,代码来源:test_ctypes.py

示例2: testAHandleOf

 def testAHandleOf(self):
     for obj in self.objects:
         if isinstance(obj, MPI.Status):
             hdl = lambda: MPI._handleof(obj)
             self.assertRaises(NotImplementedError, hdl)
             continue
         hdl = MPI._handleof(obj)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:7,代码来源:test_objmodel.py

示例3: testHandleValue

 def testHandleValue(self):
     ffi = cffi.FFI()
     typemap = {ffi.sizeof('uint32_t'): 'uint32_t',
                ffi.sizeof('uint64_t'): 'uint64_t',}
     for obj in self.objects:
         uintptr_t = typemap[MPI._sizeof(obj)]
         handle = ffi.cast(uintptr_t+'*', MPI._addressof(obj))[0]
         self.assertEqual(handle, MPI._handleof(obj))
开发者ID:benkirk,项目名称:mpi_playground,代码行数:8,代码来源:test_cffi.py

示例4: ncmpi_open

def ncmpi_open(name):
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Comm.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    errcheck(retval)
    return ncid.value
开发者ID:abhinavvishnu,项目名称:matex,代码行数:9,代码来源:pnetcdf.py

示例5: testHandleAdress

 def testHandleAdress(self):
     typemap = {ctypes.sizeof(ctypes.c_int): ctypes.c_int,
                ctypes.sizeof(ctypes.c_void_p): ctypes.c_void_p}
     for obj in self.objects:
         handle_t = typemap[MPI._sizeof(obj)]
         oldobj = obj
         newobj = type(obj)()
         handle_old = handle_t.from_address(MPI._addressof(oldobj))
         handle_new = handle_t.from_address(MPI._addressof(newobj))
         handle_new.value = handle_old.value
         self.assertEqual(obj, newobj)
开发者ID:benkirk,项目名称:mpi_playground,代码行数:11,代码来源:test_ctypes.py

示例6: check_mpi

def check_mpi():
    mpiexec_path, _ = os.path.split(distutils.spawn.find_executable("mpiexec"))
    for executable, path in mpi4py.get_config().items():
        if executable not in ['mpicc', 'mpicxx', 'mpif77', 'mpif90', 'mpifort']:
            continue
        if mpiexec_path not in path:
            raise ImportError("mpi4py may not be configured against the same version of 'mpiexec' that you are using. The 'mpiexec' path is {mpiexec_path} and mpi4py.get_config() returns:\n{mpi4py_config}\n".format(mpiexec_path=mpiexec_path, mpi4py_config=mpi4py.get_config()))
    if 'Open MPI' not in MPI.get_vendor():
        raise ImportError("mpi4py must have been installed against Open MPI in order for StructOpt to function correctly.")
    vendor_number = ".".join([str(x) for x in MPI.get_vendor()[1]])
    if vendor_number not in mpiexec_path:
        raise ImportError("The MPI version that mpi4py was compiled against does not match the version of 'mpiexec'. mpi4py's version number is {}, and mpiexec's path is {}".format(MPI.get_vendor(), mpiexec_path))
开发者ID:jjmaldonis,项目名称:mpi-parallelization,代码行数:12,代码来源:check_mpi.py

示例7: ncmpi_open

def ncmpi_open(name):
    if sys.version_info >= (3,0,0):
        name = bytes(name, 'utf-8')
    comm_ptr = MPI._addressof(MPI.COMM_WORLD)
    comm_val = MPI_Comm.from_address(comm_ptr)
    info_ptr = MPI._addressof(MPI.INFO_NULL)
    info_val = MPI_Info.from_address(info_ptr)
    ncid = c_int()
    retval = _ncmpi_open(comm_val, name, NC_NOWRITE, info_val, byref(ncid))
    # print("TEST")
    errcheck(retval)
    # print("TEST")
    return ncid.value
开发者ID:abhinavvishnu,项目名称:matex,代码行数:13,代码来源:pnetcdf.py

示例8: __init__

 def __init__(self, comm=None):
     if comm is None:
         # Should only end up here upon unpickling
         comm = MPI.COMM_WORLD
     comm_ptr = MPI._addressof(comm)
     comm_val = self.dtype.from_address(comm_ptr)
     self.value = comm_val
开发者ID:opesci,项目名称:devito,代码行数:7,代码来源:distributed.py

示例9: main

def main(split_into=2, nloops=3):
    world = MPI.COMM_WORLD
    rank = world.Get_rank()
    size = world.Get_size()
    if size < split_into:
        raise ValueError("The number of cores passed to 'mpiexec' must be greater than the number of desired communicators.")
    cores_per_comm = size // split_into

    # Create fake data for input for each of the different processes we will spawn
    multipliers = [i+1 for i in range(split_into)]
    if 'Open MPI' not in MPI.get_vendor():
        colors = [(i+1)//split_into for i in range(split_into)]
        data_by_process = [(str(multipliers[i]), str(colors[i])) for i in range(split_into)]
    else:
        data_by_process = [(str(multipliers[i]),) for i in range(split_into)]


    if rank == 0:
        print("At each iteration we will spawn {} workers with {} cores each out of a total of {} cores.".format(split_into, cores_per_comm, size))
        print("Those {} split communicators will get the following as input:".format(split_into))
        for i in range(split_into):
            print("    Communicator {}: {}".format(i, data_by_process[i]))

        for i in range(nloops):
            print("Iteration {}...".format(i))
            spawn_multiple(split_into, cores_per_comm, data_by_process)
开发者ID:jjmaldonis,项目名称:mpi-parallelization,代码行数:26,代码来源:spawn_multiple_loop.py

示例10: getlibraryinfo

def getlibraryinfo():
    from mpi4py import MPI
    info = "MPI %d.%d" % MPI.Get_version()
    name, version = MPI.get_vendor()
    if name != "unknown":
        info += (" (%s %s)" % (name, '%d.%d.%d' % version))
    return info
开发者ID:erdc-cm,项目名称:mpi4py,代码行数:7,代码来源:runtests.py

示例11: ensure_mpd_is_running

def ensure_mpd_is_running():
    if not is_mpd_running():
        name_of_the_vendor, version = MPI.get_vendor()
        if name_of_the_vendor == "MPICH2":
            try:
                process = subprocess.Popen(["nohup", "mpd"], close_fds=True)
            except OSError as ex:
                pass
开发者ID:vdhelm,项目名称:amuse,代码行数:8,代码来源:background_test.py

示例12: setup_md

 def setup_md(self, icomm_grid, xyzL, xyz_orig):
     """
     setup_md(self, dt, icomm_grid, xyzL, xyz_orig)
     Keyword arguments:
     real -- the real part (default 0.0)
     imag -- the imaginary part (default 0.0)
     """
     self.py_setup_md(MPI._handleof(icomm_grid), xyzL, xyz_orig)
开发者ID:Crompulence,项目名称:cpl-library,代码行数:8,代码来源:cplpy.py

示例13: is_mpd_running

def is_mpd_running():
    name_of_the_vendor, version = MPI.get_vendor()
    if name_of_the_vendor == 'MPICH2':
        process = subprocess.Popen(['mpdtrace'], stdout = subprocess.PIPE, stderr = subprocess.PIPE)
        (output_string, error_string) = process.communicate()
        return not (process.returncode == 255)
    else:
        return True
开发者ID:Ingwar,项目名称:amuse,代码行数:8,代码来源:background_test.py

示例14: _buffer_from_gpuarray

    def _buffer_from_gpuarray(self, array):
        data = array.gpudata
        # data might be an `int` or `DeviceAllocation`

        if isinstance(data, cuda.DeviceAllocation):
            return data.as_buffer(array.nbytes)
        else:
            # construct the buffer
            return MPI.make_buffer(array.gpudata, array.nbytes)
开发者ID:shwina,项目名称:gpuDA,代码行数:9,代码来源:gpuda.py

示例15: send

def send(data, data_package, dest=None, gpu_direct=True):
	global s_requests
	tag = 52
	dp = data_package
	# send data_package
	send_data_package(dp, dest=dest, tag=tag)

	bytes = dp.data_bytes
	memory_type = dp.memory_type
	
	if log_type in ['time','all']: st = time.time()

	flag = False
	request = None
	if memory_type == 'devptr': # data in the GPU
		if gpu_direct: # want to use GPU direct
			devptr = data
			buf = MPI.make_buffer(devptr.__int__(), bytes)
			ctx.synchronize()
			request = comm.Isend([buf, MPI.BYTE], dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, buf, devptr))
			flag = True
		else:# not want to use GPU direct
		
			# copy to CPU
			shape = dp.data_memory_shape
			dtype = dp.data_contents_memory_dtype
			buf = numpy.empty(shape, dtype=dtype)
			cuda.memcpy_dtoh_async(buf, data, stream=stream_list[1])

			request = comm.Isend(buf, dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, buf, None))
			
	else: # data in the CPU
		# want to use GPU direct, not exist case
		# not want to use GPU direct
		if dp.data_dtype == numpy.ndarray: 
			request = comm.Isend(data, dest=dest, tag=57)
			if VIVALDI_BLOCKING: MPI.Request.Wait(request)
			s_requests.append((request, data, None))
			
	if log_type in ['time','all']:
		u = dp.unique_id
		bytes = dp.data_bytes
		t = MPI.Wtime()-st
		ms = 1000*t
		bw = bytes/GIGA/t
	
		if flag:
			log("rank%d, \"%s\", u=%d, from rank%d to rank%d GPU direct send, Bytes: %dMB, time: %.3f ms, speed: %.3f GByte/sec"%(rank, name, u, rank, dest, bytes/MEGA, ms, bw),'time', log_type)
		else:
			log("rank%d, \"%s\", u=%d, from rank%d to rank%d MPI data transfer, Bytes: %dMB, time: %.3f ms, speed: %.3f GByte/sec"%(rank, name, u, rank, dest, bytes/MEGA, ms, bw),'time', log_type)
	
	return request
开发者ID:Anukura,项目名称:Vivaldi,代码行数:56,代码来源:GPU_unit.py


注:本文中的mpi4py.MPI类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。