本文整理汇总了Python中mpi4py.MPI.MAX属性的典型用法代码示例。如果您正苦于以下问题:Python MPI.MAX属性的具体用法?Python MPI.MAX怎么用?Python MPI.MAX使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mpi4py.MPI
的用法示例。
在下文中一共展示了MPI.MAX属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_minmax_coordinates_mesh
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def _get_minmax_coordinates_mesh(self, axis=0):
""" Return the minimum and maximum coordinates along axis
parameter:
----------
axis:
axis
returns:
-------
tuple: minV, maxV
"""
maxVal = np.zeros((1))
minVal = np.zeros((1))
maxVal[0] = self.Model.mesh.data[:, axis].max()
minVal[0] = self.Model.mesh.data[:, axis].min()
comm.Barrier()
comm.Allreduce(_MPI.IN_PLACE, maxVal, op=_MPI.MAX)
comm.Allreduce(_MPI.IN_PLACE, minVal, op=_MPI.MIN)
comm.Barrier()
return minVal, maxVal
示例2: mpi_statistics_scalar
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def mpi_statistics_scalar(x, with_min_and_max=False):
"""
Get mean/std and optional min/max of scalar x across MPI processes.
Args:
x: An array containing samples of the scalar to produce statistics
for.
with_min_and_max (bool): If true, return min and max of x in
addition to mean and std.
"""
x = np.array(x, dtype=np.float32)
global_sum, global_n = mpi_sum([np.sum(x), len(x)])
mean = global_sum / global_n
global_sum_sq = mpi_sum(np.sum((x - mean)**2))
std = np.sqrt(global_sum_sq / global_n) # compute global std
if with_min_and_max:
global_min = mpi_op(np.min(x) if len(x) > 0 else np.inf, op=MPI.MIN)
global_max = mpi_op(np.max(x) if len(x) > 0 else -np.inf, op=MPI.MAX)
return mean, std, global_min, global_max
return mean, std
示例3: __abs__
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def __abs__(self):
"""
Overloading the abs operator for mesh types
Returns:
float: absolute maximum of all mesh values
"""
# take absolute values of the mesh values
local_absval = np.amax(abs(self.values))
comm = self.comm
if comm is not None:
if comm.Get_size() > 1:
global_absval = comm.allreduce(sendobj=local_absval, op=MPI.MAX)
else:
global_absval = local_absval
else:
global_absval = local_absval
return global_absval
示例4: __abs__
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def __abs__(self):
"""
Overloading the abs operator for mesh types
Returns:
float: absolute maximum of all mesh values
"""
# take absolute values of the mesh values
local_absval = np.amax(abs(self.values))
comm = self.comm
if comm is not None:
if comm.Get_size() > 1:
global_absval = comm.allreduce(sendobj=local_absval, op=MPI.MAX)
else:
global_absval = local_absval
else:
global_absval = local_absval
return global_absval
示例5: mpi_statistics_scalar
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def mpi_statistics_scalar(x, with_min_and_max=False):
"""
Get mean/std and optional min/max of scalar x across MPI processes.
Args:
x: An array containing samples of the scalar to produce statistics
for.
with_min_and_max (bool): If true, return min and max of x in
addition to mean and std.
"""
x = np.array(x, dtype=np.float32)
global_sum, global_n = mpi_sum([np.sum(x), len(x)])
mean = global_sum / global_n
global_sum_sq = mpi_sum(np.sum((x - mean) ** 2))
std = np.sqrt(global_sum_sq / global_n) # compute global std
if with_min_and_max:
global_min = mpi_op(np.min(x) if len(x) > 0 else np.inf, op=MPI.MIN)
global_max = mpi_op(np.max(x) if len(x) > 0 else -np.inf, op=MPI.MAX)
return mean, std, global_min, global_max
return mean, std
示例6: _get_minmax_velocity_wall
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def _get_minmax_velocity_wall(self, wall, axis=0):
""" Return the minimum and maximum velocity component on the wall
parameters:
-----------
wall: (indexSet)
The wall.
axis:
axis (velocity component).
"""
# Initialise value to max and min sys values
maxV = np.ones((1)) * sys.float_info.min
minV = np.ones((1)) * sys.float_info.max
# if local domain has wall, get velocities
if wall.data.size > 0:
velocities = self.Model.velocityField.data[wall.data, axis]
# get local min and max
maxV[0] = velocities.max()
minV[0] = velocities.min()
# reduce operation
comm.Barrier()
comm.Allreduce(_MPI.IN_PLACE, maxV, op=_MPI.MAX)
comm.Allreduce(_MPI.IN_PLACE, minV, op=_MPI.MIN)
comm.Barrier()
return minV, maxV
示例7: mpi_max
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def mpi_max(value):
global_max = np.zeros(1, dtype='float64')
local_max = np.max(value).astype('float64')
MPI.COMM_WORLD.Reduce(local_max, global_max, op=MPI.MAX)
return global_max[0]
示例8: __init__
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def __init__(self, source, fsky, cosmo, bins=None, redshift='Redshift', weight=None):
# input columns need to be there
for col in [redshift, weight]:
if col is not None and col not in source:
raise ValueError("'%s' column missing from input source in RedshiftHistogram" %col)
self.comm = source.comm
# using Scott's rule for binning
if bins is None:
h, bins = scotts_bin_width(source.compute(source[redshift]), self.comm)
if self.comm.rank == 0:
self.logger.info("using Scott's rule to determine optimal binning; h = %.2e, N_bins = %d" %(h, len(bins)-1))
# equally spaced bins from min to max val
elif numpy.isscalar(bins):
if self.comm.rank == 0:
self.logger.info("computing %d equally spaced bins" %bins)
z = source.compute(source[redshift])
maxval = self.comm.allreduce(z.max(), op=MPI.MAX)
minval = self.comm.allreduce(z.min(), op=MPI.MIN)
bins = linspace(minval, maxval, bins + 1, endpoint=True)
self.source = source
self.cosmo = cosmo
self.attrs = {}
self.attrs['edges'] = bins
self.attrs['fsky'] = fsky
self.attrs['redshift'] = redshift
self.attrs['weight'] = weight
self.attrs['cosmo'] = dict(cosmo)
# and run
self.run()
示例9: global_max
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def global_max(vs, arr, axis=None):
from mpi4py import MPI
return _reduce(vs, arr, MPI.MAX, axis=axis)
示例10: compute_residual
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def compute_residual(self):
"""
Computation of the residual using the collocation matrix Q
"""
# get current level and problem description
L = self.level
# check if there are new values (e.g. from a sweep)
# assert L.status.updated
# compute the residual for each node
# build QF(u)
res = self.integrate()
res += L.u[0] - L.u[self.rank + 1]
# add tau if associated
if L.tau[self.rank] is not None:
res += L.tau[self.rank]
# use abs function from data type here
res_norm = abs(res)
# find maximal residual over the nodes
L.status.residual = self.params.comm.allreduce(res_norm, op=MPI.MAX)
# indicate that the residual has seen the new values
L.status.updated = False
return None
示例11: set_msk_laplacian
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def set_msk_laplacian(self, prevlev=0, mskf=0):
""" set the good mask and the laplacian using previous level """
if prevlev == 0:
self.msk = mskf
self.compute_A_atfinest()
else:
mskf = prevlev.msk*1.0
coef = ones((self.mv, self.nv))
finetocoarse(prevlev, self, mskf, coef)
self.msk[coef <= 0.5] = 0
np = self.np
mp = self.mp
myrank = self.myrank
iloc = myrank % np
jloc = (myrank//np) % mp
nh = self.nh
self.compute_A_atcoarser(prevlev, self.msk)
# # coefficient for the Jacobi iteration, A[:,:,4] is the main diagonal
# val=abs(self.A[:,:,4]).max()
# self.coef = MPI.COMM_WORLD.allreduce(val, op=MPI.MAX)
# if self.coef!=0.:
# self.coef=1./self.coef
# else:
# if self.myrank==0:
# print('matrix diagonal is zero')
# print('fix something!')
# exit()
# buffer for 'smoothertwice' the Fortran subroutine
self.yo = zeros((3, self.nv))
# ----------------------------------------
示例12: norm
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def norm(self, x):
""" norm = sum(x*x) """
nbduplicates = (self.np0*self.mp0)/(self.np*self.mp)
# computenorm is done in Fortran
self.typenorm = 'l2'
t0 = time()
# MPI.COMM_WORLD.Barrier()
if self.typenorm == 'l2':
local_sum = computenorm(self.msk, x, self.nh)
t1 = time()
self.time['norm'] += t1-t0
self.ncalls['norm'] += 1
z = MPI.COMM_WORLD.allreduce(local_sum, op=MPI.SUM) / nbduplicates
z = sqrt(z)
t0 = time()
self.time['reduce'] += t0-t1
self.ncalls['reduce'] += 1
if self.typenorm == 'inf':
local_z = computemax(self.msk, x, self.nh)
t1 = time()
self.time['norm'] += t1-t0
self.ncalls['norm'] += 1
z = MPI.COMM_WORLD.allreduce(local_z, op=MPI.MAX)
t0 = time()
self.time['reduce'] += t0-t1
self.ncalls['reduce'] += 1
return z
示例13: test_op_to_mpi
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def test_op_to_mpi(self):
reload(util)
assert util.op_to_mpi('+') == MPI.SUM
assert util.op_to_mpi("sum") == MPI.SUM
assert util.op_to_mpi("add") == MPI.SUM
assert util.op_to_mpi('*') == MPI.PROD
assert util.op_to_mpi("prod") == MPI.PROD
assert util.op_to_mpi("product") == MPI.PROD
assert util.op_to_mpi("mul") == MPI.PROD
assert util.op_to_mpi("max") == MPI.MAX
assert util.op_to_mpi("maximum") == MPI.MAX
assert util.op_to_mpi("min") == MPI.MIN
assert util.op_to_mpi("minimum") == MPI.MIN
示例14: scotts_bin_width
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def scotts_bin_width(data, comm):
r"""
Return the optimal histogram bin width using Scott's rule,
defined as:
.. math::
h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
.. note::
This is a collective operation
Parameters
----------
data : array_like
the array that we are histograming
comm :
the MPI communicator
Returns
-------
dx : float
the bin spacing
edges : array_like
the array holding the bin edges
"""
# compute the mean
csum = comm.allreduce(data.sum())
csize = comm.allreduce(data.size)
cmean = csum / csize
# std dev
rsum = comm.allreduce((abs(data - cmean)**2).sum())
sigma = (rsum / csize)**0.5
dx = sigma * (24. * numpy.sqrt(numpy.pi) / csize) ** (1. / 3)
maxval = comm.allreduce(data.max(), op=MPI.MAX)
minval = comm.allreduce(data.min(), op=MPI.MIN)
Nbins = numpy.ceil((maxval - minval) * 1. / dx)
Nbins = max(1, Nbins)
edges = minval + dx * numpy.arange(Nbins + 1)
return dx, edges
示例15: main
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import MAX [as 别名]
def main(nprocs_space=None):
# print(MPI.Query_thread(), MPI.THREAD_MULTIPLE)
# set MPI communicator
comm = MPI.COMM_WORLD
world_rank = comm.Get_rank()
world_size = comm.Get_size()
# split world communicator to create space-communicators
color = int(world_rank / nprocs_space)
space_comm = comm.Split(color=color)
space_rank = space_comm.Get_rank()
# split world communicator to create time-communicators
color = int(world_rank % nprocs_space)
time_comm = comm.Split(color=color)
time_rank = time_comm.Get_rank()
time_size = time_comm.Get_size()
th = [None]
comm.Barrier()
t0 = time.time()
if time_rank < time_size - 1:
send_stuff(th, space_rank, time_rank, time_comm)
if time_rank > 0:
recv_stuff(space_rank, time_rank, time_comm)
if time_rank < time_size - 1:
sleep(100000000)
th[0].join()
t1 = time.time()
comm.Barrier()
maxtime = space_comm.allreduce(t1 - t0, MPI.MAX)
if space_rank == 0:
print(f'Time-Rank: {time_rank} -- Time: {maxtime}')