本文整理汇总了Python中pyfr.mpiutil.get_comm_rank_root函数的典型用法代码示例。如果您正苦于以下问题:Python get_comm_rank_root函数的具体用法?Python get_comm_rank_root怎么用?Python get_comm_rank_root使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_comm_rank_root函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, *args, **kwargs):
super(FileWriter, self).__init__(*args, **kwargs)
# MPI info
comm, rank, root = get_comm_rank_root()
# Get the type and shape of each element in the partition
etypes, shapes = self._system.ele_types, self._system.ele_shapes
# Gather this information onto the root rank
eleinfo = comm.gather(zip(etypes, shapes), root=root)
if rank == root:
self._mpi_rbufs = mpi_rbufs = []
self._mpi_rreqs = mpi_rreqs = []
self._mpi_names = mpi_names = []
self._loc_names = loc_names = []
for mrank, meleinfo in enumerate(eleinfo):
prank = self._rallocs.mprankmap[mrank]
for tag, (etype, dims) in enumerate(meleinfo):
name = self._get_name_for_soln(etype, prank)
if mrank == root:
loc_names.append(name)
else:
rbuf = np.empty(dims, dtype=self._backend.fpdtype)
rreq = comm.Recv_init(rbuf, mrank, tag)
mpi_rbufs.append(rbuf)
mpi_rreqs.append(rreq)
mpi_names.append(name)
示例2: _resid
def _resid(self, dtau, x):
comm, rank, root = get_comm_rank_root()
# Get an errest kern to compute the square of the maximum residual
errest = self._get_errest_kerns()
# Prepare and run the kernel
self._prepare_reg_banks(x, x, x)
self._queue % errest(dtau, 0.0)
# L2 norm
if self._pseudo_norm == 'l2':
# Reduce locally (element types) and globally (MPI ranks)
res = np.array([sum(ev) for ev in zip(*errest.retval)])
comm.Allreduce(get_mpi('in_place'), res, op=get_mpi('sum'))
# Normalise and return
return np.sqrt(res / self._gndofs)
# L^∞ norm
else:
# Reduce locally (element types) and globally (MPI ranks)
res = np.array([max(ev) for ev in zip(*errest.retval)])
comm.Allreduce(get_mpi('in_place'), res, op=get_mpi('max'))
# Normalise and return
return np.sqrt(res)
示例3: __init__
def __init__(self, mesh, cfg):
self.cfg = cfg
comm, rank, root = get_comm_rank_root()
# Have the root rank determine the connectivity of the mesh
if rank == root:
prankconn = self._get_mesh_connectivity(mesh)
nparts = len(prankconn)
if nparts != comm.size:
raise RuntimeError(
"Mesh has {0} partitions but running with " "{1} MPI ranks".format(nparts, comm.size)
)
else:
prankconn = None
# Get subclass dependant info about each rank (e.g., hostname)
rinfo = comm.gather(self._get_rank_info(), root=root)
# If we are the root rank then perform the rank allocation
if rank == root:
mprankmap = self._get_mprankmap(prankconn, rinfo)
else:
mprankmap = None
# Broadcast the connectivity and rank mappings to all other ranks
self.prankconn = prankconn = comm.bcast(prankconn, root=root)
self.mprankmap = mprankmap = comm.bcast(mprankmap, root=root)
# Invert the mapping to obtain the physical-to-MPI rank mapping
self.pmrankmap = sorted(range(comm.size), key=mprankmap.__getitem__)
# Compute our physical rank
self.prank = mprankmap[rank]
示例4: __init__
def __init__(self, mesh, cfg):
self.cfg = cfg
comm, rank, root = get_comm_rank_root()
if rank == root:
# Determine the (physical) connectivity of the mesh
prankconn = self._get_mesh_connectivity(mesh)
nparts = len(prankconn) or 1
if nparts != comm.size:
raise RuntimeError('Mesh has %d partitions but running with '
'%d MPI ranks' % (nparts, comm.size))
else:
prankconn = None
# Get subclass dependant info about each rank (e.g, hostname)
rinfo = comm.gather(self._get_rank_info(), root=root)
if rank == root:
# Use this info to construct a mapping from MPI ranks to
# physical mesh ranks
mprankmap = self._get_mprankmap(prankconn, rinfo)
else:
mprankmap = None
# Broadcast the connectivity and physical to each MPI rank
self.prankconn = comm.bcast(prankconn, root=root)
self.mprankmap = comm.bcast(mprankmap, root=root)
# Invert this mapping
self.pmrankmap = {v: k for k, v in self.mprankmap.items()}
# Compute the physical rank of ourself
self.prank = self.mprankmap[rank]
示例5: _write_parallel
def _write_parallel(self, path, solnmap, metadata):
comm, rank, root = get_comm_rank_root()
with h5py.File(path, 'w', driver='mpio', comm=comm) as h5file:
smap = {}
for name, shape in self.sollist:
smap[name] = h5file.create_dataset(
name, shape, dtype=self.backend.fpdtype
)
for e, sol in solnmap.items():
s = self._get_name_for_soln(e, self.rallocs.prank)
smap[s][:] = sol
# Metadata information has to be transferred to all the ranks
if rank == root:
mmap = [(k, len(v.encode()))
for k, v in metadata.items()]
else:
mmap = None
for name, size in comm.bcast(mmap, root=root):
d = h5file.create_dataset(name, (), dtype='S{}'.format(size))
if rank == root:
d.write_direct(np.array(metadata[name], dtype='S'))
示例6: _write_parallel
def _write_parallel(self, path, data, metadata):
comm, rank, root = get_comm_rank_root()
with h5py.File(path, 'w', driver='mpio', comm=comm) as h5file:
dmap = {}
for name, shape in self._global_shape_list:
dmap[name] = h5file.create_dataset(
name, shape, dtype=self.fpdtype
)
# Write out our data sets using 2 GiB chunks
for name, dat in zip(self._loc_names, data):
nrows = len(dat)
rowsz = dat.nbytes // nrows
rstep = 2*1024**3 // rowsz
if rstep == 0:
raise RuntimeError('Array is too large for parallel I/O')
for ix in range(0, nrows, rstep):
dmap[name][ix:ix + rstep] = dat[ix:ix + rstep]
# Metadata information has to be transferred to all the ranks
if rank == root:
mmap = [(k, len(v.encode()))
for k, v in metadata.items()]
else:
mmap = None
for name, size in comm.bcast(mmap, root=root):
d = h5file.create_dataset(name, (), dtype='S{}'.format(size))
if rank == root:
d.write_direct(np.array(metadata[name], dtype='S'))
示例7: __init__
def __init__(self, intg, cfgsect, suffix):
super().__init__(intg, cfgsect, suffix)
comm, rank, root = get_comm_rank_root()
# Output frequency
self.nsteps = self.cfg.getint(cfgsect, "nsteps")
# The root rank needs to open the output file
if rank == root:
# Determine the file path
fname = self.cfg.get(cfgsect, "file")
# Append the '.csv' extension
if not fname.endswith(".csv"):
fname += ".csv"
# Open for appending
self.outf = open(fname, "a")
# Output a header if required
if os.path.getsize(fname) == 0 and self.cfg.getbool(cfgsect, "header", True):
# Conservative variable list
convars = intg.system.elementscls.convarmap[self.ndims]
print(",".join(["t"] + convars), file=self.outf)
示例8: _get_gndofs
def _get_gndofs(self):
comm, rank, root = get_comm_rank_root()
# Get the number of degrees of freedom in this partition
ndofs = sum(self.system.ele_ndofs)
# Sum to get the global number over all partitions
return comm.allreduce(ndofs, op=get_mpi('sum'))
示例9: main
def main():
ap = ArgumentParser(prog='pyfr-sim', description='Runs a PyFR simulation')
ap.add_argument('--verbose', '-v', action='count')
ap.add_argument('--backend', '-b', default='cuda', help='Backend to use')
ap.add_argument('--progress', '-p', action='store_true',
help='show a progress bar')
ap.add_argument('--nansweep', '-n', metavar='N', type=int,
help='check for NaNs every N steps')
sp = ap.add_subparsers(help='sub-command help')
ap_run = sp.add_parser('run', help='run --help')
ap_run.add_argument('mesh', help='mesh file')
ap_run.add_argument('cfg', type=FileType('r'), help='config file')
ap_run.set_defaults(process=process_run)
ap_restart = sp.add_parser('restart', help='restart --help')
ap_restart.add_argument('mesh', help='mesh file')
ap_restart.add_argument('soln', help='solution file')
ap_restart.add_argument('cfg', nargs='?', type=FileType('r'),
help='new config file')
ap_restart.set_defaults(process=process_restart)
# Parse the arguments
args = ap.parse_args()
mesh, soln, cfg = args.process(args)
# Create a backend
backend = get_backend(args.backend, cfg)
# Bring up MPI (this must be done after we have created a backend)
mpiutil.init()
# Get the mapping from physical ranks to MPI ranks
rallocs = get_rank_allocation(mesh, cfg)
# Construct the solver
solver = get_solver(backend, rallocs, mesh, soln, cfg)
# If we are running interactively then create a progress bar
if args.progress and mpiutil.get_comm_rank_root()[1] == 0:
pb = ProgressBar(solver.tstart, solver.tcurr, solver.tend)
# Register a callback to update the bar after each step
callb = lambda intg: pb.advance_to(intg.tcurr)
solver.completed_step_handlers.append(callb)
# NaN sweeping
if args.nansweep:
def nansweep(intg):
if intg.nsteps % args.nansweep == 0:
if any(np.isnan(np.sum(s)) for s in intg.soln):
raise RuntimeError('NaNs detected at t = {}'
.format(intg.tcurr))
solver.completed_step_handlers.append(nansweep)
# Execute!
solver.run()
示例10: __call__
def __call__(self, intg):
# Return if no output is due
if intg.nsteps % self.nsteps:
return
# MPI info
comm, rank, root = get_comm_rank_root()
# Solution matrices indexed by element type
solns = dict(zip(intg.system.ele_types, intg.soln))
# Force vector
f = np.zeros(self.ndims)
for etype, fidx in self._m0:
# Get the interpolation operator
m0 = self._m0[etype, fidx]
nfpts, nupts = m0.shape
# Extract the relevant elements from the solution
uupts = solns[etype][..., self._eidxs[etype, fidx]]
# Interpolate to the face
ufpts = np.dot(m0, uupts.reshape(nupts, -1))
ufpts = ufpts.reshape(nfpts, self.nvars, -1)
ufpts = ufpts.swapaxes(0, 1)
# Compute the pressure
p = self.elementscls.conv_to_pri(ufpts, self.cfg)[-1]
# Get the quadrature weights and normal vectors
qwts = self._qwts[etype, fidx]
norms = self._norms[etype, fidx]
# Do the quadrature
f += np.einsum('i...,ij,jik', qwts, p, norms)
# Reduce and output if we're the root rank
if rank != root:
comm.Reduce(f, None, op=get_mpi('sum'), root=root)
else:
comm.Reduce(get_mpi('in_place'), f, op=get_mpi('sum'), root=root)
# Build the row
row = [intg.tcurr] + f.tolist()
# Write
print(','.join(str(r) for r in row), file=self.outf)
# Flush to disk
self.outf.flush()
示例11: __init__
def __init__(self, intg, cfgsect, suffix):
super().__init__(intg, cfgsect, suffix)
comm, rank, root = get_comm_rank_root()
# Output frequency
self.nsteps = self.cfg.getint(cfgsect, 'nsteps')
# The root rank needs to open the output file
if rank == root:
header = ['t'] + intg.system.elementscls.convarmap[self.ndims]
# Open
self.outf = init_csv(self.cfg, cfgsect, ','.join(header))
示例12: __init__
def __init__(self, intg, cfgsect, suffix):
super().__init__(intg, cfgsect, suffix)
# Underlying elements class
self.elementscls = intg.system.elementscls
# Output frequency
self.nsteps = self.cfg.getint(cfgsect, 'nsteps')
# List of points to be sampled and format
self.pts = ast.literal_eval(self.cfg.get(cfgsect, 'samp-pts'))
self.fmt = self.cfg.get(cfgsect, 'format', 'primitive')
# MPI info
comm, rank, root = get_comm_rank_root()
# MPI rank responsible for each point and rank-indexed info
self._ptsrank = ptsrank = []
self._ptsinfo = ptsinfo = [[] for i in range(comm.size)]
# Physical location of the solution points
plocs = [p.swapaxes(1, 2) for p in intg.system.ele_ploc_upts]
for p in self.pts:
# Find the nearest point in our partition
cp = _closest_upt(intg.system.ele_types, plocs, p)
# Reduce over all partitions
mcp, mrank = comm.allreduce(cp, op=get_mpi('minloc'))
# Store the rank responsible along with the info
ptsrank.append(mrank)
ptsinfo[mrank].append(mcp[1:])
# If we're the root rank then open the output file
if rank == root:
# Determine the file path
fname = self.cfg.get(cfgsect, 'file')
# Append the '.csv' extension
if not fname.endswith('.csv'):
fname += '.csv'
# Open for appending
self.outf = open(fname, 'a')
# Output a header if required
if (os.path.getsize(fname) == 0 and
self.cfg.getbool(cfgsect, 'header', True)):
print(self._header, file=self.outf)
示例13: __init__
def __init__(self, backend, systemcls, rallocs, mesh, initsoln, cfg):
self.backend = backend
self.rallocs = rallocs
self.cfg = cfg
# Sanity checks
if self._controller_needs_errest and not self._stepper_has_errest:
raise TypeError('Incompatible stepper/controller combination')
# Start time
self.tstart = cfg.getfloat('solver-time-integrator', 't0', 0.0)
# Output times
self.tout = sorted(range_eval(cfg.get('soln-output', 'times')))
self.tend = self.tout[-1]
# Current time; defaults to tstart unless resuming a simulation
if initsoln is None or 'stats' not in initsoln:
self.tcurr = self.tstart
else:
stats = Inifile(initsoln['stats'])
self.tcurr = stats.getfloat('solver-time-integrator', 'tcurr')
# Cull already written output times
self.tout = [t for t in self.tout if t > self.tcurr]
# Ensure no time steps are in the past
if self.tout[0] < self.tcurr:
raise ValueError('Output times must be in the future')
# Determine the amount of temp storage required by thus method
nreg = self._stepper_nregs
# Construct the relevant mesh partition
self.system = systemcls(backend, rallocs, mesh, initsoln, nreg, cfg)
# Extract the UUID of the mesh (to be saved with solutions)
self._mesh_uuid = mesh['mesh_uuid']
# Get a queue for subclasses to use
self._queue = backend.queue()
# Get the number of degrees of freedom in this partition
ndofs = sum(self.system.ele_ndofs)
comm, rank, root = get_comm_rank_root()
# Sum to get the global number over all partitions
self._gndofs = comm.allreduce(ndofs, op=get_mpi('sum'))
示例14: __init__
def __init__(self, intg, cfgsect, prefix):
super().__init__(intg, cfgsect, prefix)
self.flushsteps = self.cfg.getint(self.cfgsect, 'flushsteps', 500)
self.count = 0
self.stats = []
self.tprev = intg.tcurr
# MPI info
comm, rank, root = get_comm_rank_root()
# The root rank needs to open the output file
if rank == root:
self.outf = init_csv(self.cfg, cfgsect, 'n,t,dt,action,error')
示例15: _invoke_postaction
def _invoke_postaction(self, **kwargs):
comm, rank, root = get_comm_rank_root()
# If we have a post-action and are the root rank then fire it
if rank == root and self.postact:
# If a post-action is currently running then wait for it
if self.postactaid is not None:
prefork.wait(self.postactaid)
# Prepare the command line
cmdline = shlex.split(self.postact.format(**kwargs))
# Invoke
if self.postactmode == "blocking":
prefork.call(cmdline)
else:
self.postactaid = prefork.call_async(cmdline)