本文整理汇总了Python中mpi4py.MPI.COMM_WORLD属性的典型用法代码示例。如果您正苦于以下问题:Python MPI.COMM_WORLD属性的具体用法?Python MPI.COMM_WORLD怎么用?Python MPI.COMM_WORLD使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mpi4py.MPI
的用法示例。
在下文中一共展示了MPI.COMM_WORLD属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _helper_runningmeanstd
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple,axis) in [
((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)
for (a1,a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print("ok!")
示例2: read_eom_amplitudes
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def read_eom_amplitudes(vec_shape, filename="reom_amplitudes.hdf5", vec=None):
task_list = generate_max_task_list(vec_shape)
read_success = False
return False, None # TODO: find a way to make the amplitudes are consistent
# with the signs of the eris/t-amplitudes when restarting
print("attempting to read in eom amplitudes from file ", filename)
if os.path.isfile(filename):
print("reading eom amplitudes from file. shape=", vec_shape)
feri = h5py.File(filename, 'r', driver='mpio', comm=MPI.COMM_WORLD)
saved_v = feri['v']
if vec is None:
vec = np.empty(vec_shape,dtype=saved_v.dtype)
assert(saved_v.shape == vec_shape)
task_list = generate_max_task_list(vec.shape)
for block in task_list:
which_slice = [slice(*x) for x in block]
vec[tuple(which_slice)] = saved_v[tuple(which_slice)]
feri.close()
read_success = True
if vec is not None and vec_shape[-1] == 1:
vec = vec.reshape(vec_shape[:-1])
return read_success, vec
示例3: profile
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def profile(filename=None, comm=MPI.COMM_WORLD):
def prof_decorator(f):
def wrap_f(*args, **kwargs):
pr = cProfile.Profile()
pr.enable()
result = f(*args, **kwargs)
pr.disable()
if filename is None:
pr.print_stats()
else:
filename_r = filename # + ".{}".format(comm.rank)
pr.dump_stats(filename_r)
return result
return wrap_f
return prof_decorator
示例4: sync_from_root
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def sync_from_root(sess, variables, comm=None):
"""
Send the root node's parameters to every worker.
Arguments:
sess: the TensorFlow session.
variables: all parameter variables including optimizer's
"""
if comm is None: comm = MPI.COMM_WORLD
rank = comm.Get_rank()
for var in variables:
if rank == 0:
comm.Bcast(sess.run(var))
else:
import tensorflow as tf
returned_var = np.empty(var.shape, dtype='float32')
comm.Bcast(returned_var)
sess.run(tf.assign(var, returned_var))
示例5: __init__
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def __init__(self):
# Initialize communicator and obtain standard MPI variables
comm = MPI.COMM_WORLD
self.comm = comm
self.rank = comm.Get_rank()
self.num_procs = comm.Get_size()
self.name = MPI.Get_processor_name()
# Define master rank
self.master_rank = 0
# Define message tags for task, result, and announce
self.task_tag = 10
self.result_tag = 20
self.announce_tag = 30
# create an empty message buffer
messages = []
示例6: mpi_mean
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def mpi_mean(arr, axis=0, comm=None, keepdims=False):
"""
calculates the mean of an array, using MPI
:param arr: (np.ndarray)
:param axis: (int or tuple or list) the axis to run the means over
:param comm: (MPI Communicators) if None, MPI.COMM_WORLD
:param keepdims: (bool) keep the other dimensions intact
:return: (np.ndarray or Number) the result of the sum
"""
arr = np.asarray(arr)
assert arr.ndim > 0
if comm is None:
comm = MPI.COMM_WORLD
xsum = arr.sum(axis=axis, keepdims=keepdims)
size = xsum.size
localsum = np.zeros(size + 1, arr.dtype)
localsum[:size] = xsum.ravel()
localsum[size] = arr.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return globalsum[:size].reshape(xsum.shape) / globalsum[size], globalsum[size]
示例7: mpi_moments
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def mpi_moments(arr, axis=0, comm=None, keepdims=False):
"""
calculates the mean and std of an array, using MPI
:param arr: (np.ndarray)
:param axis: (int or tuple or list) the axis to run the moments over
:param comm: (MPI Communicators) if None, MPI.COMM_WORLD
:param keepdims: (bool) keep the other dimensions intact
:return: (np.ndarray or Number) the result of the moments
"""
arr = np.asarray(arr)
assert arr.ndim > 0
mean, count = mpi_mean(arr, axis=axis, comm=comm, keepdims=True)
sqdiffs = np.square(arr - mean)
meansqdiff, count1 = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True)
assert count1 == count
std = np.sqrt(meansqdiff)
if not keepdims:
newshape = mean.shape[:axis] + mean.shape[axis + 1:]
mean = mean.reshape(newshape)
std = std.reshape(newshape)
return mean, std, count
示例8: _helper_runningmeanstd
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def _helper_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple, axis) in [
((np.random.randn(3), np.random.randn(4), np.random.randn(5)), 0),
((np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2)), 0),
((np.random.randn(2, 3), np.random.randn(2, 4), np.random.randn(2, 4)), 1)]:
arr = np.concatenate(triple, axis=axis)
ms1 = [arr.mean(axis=axis), arr.std(axis=axis), arr.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()], axis=axis)
for (res_1, res_2) in zipsame(ms1, ms2):
print(res_1, res_2)
assert np.allclose(res_1, res_2)
print("ok!")
示例9: mpi_moments
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def mpi_moments(x, axis=0):
x = np.asarray(x, dtype='float64')
newshape = list(x.shape)
newshape.pop(axis)
n = np.prod(newshape,dtype=int)
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=axis).ravel(),
np.square(x).sum(axis=axis).ravel(),
np.array([x.shape[axis]],dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
sum = totalvec[:n]
sumsq = totalvec[n:2*n]
count = totalvec[2*n]
if count == 0:
mean = np.empty(newshape); mean[:] = np.nan
std = np.empty(newshape); std[:] = np.nan
else:
mean = sum/count
std = np.sqrt(np.maximum(sumsq/count - np.square(mean),0))
return mean, std, count
示例10: test_runningmeanstd
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def test_runningmeanstd():
comm = MPI.COMM_WORLD
np.random.seed(0)
for (triple,axis) in [
((np.random.randn(3), np.random.randn(4), np.random.randn(5)),0),
((np.random.randn(3,2), np.random.randn(4,2), np.random.randn(5,2)),0),
((np.random.randn(2,3), np.random.randn(2,4), np.random.randn(2,4)),1),
]:
x = np.concatenate(triple, axis=axis)
ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]]
ms2 = mpi_moments(triple[comm.Get_rank()],axis=axis)
for (a1,a2) in zipsame(ms1, ms2):
print(a1, a2)
assert np.allclose(a1, a2)
print("ok!")
示例11: mpi_mean
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def mpi_mean(x, axis=0, comm=None, keepdims=False):
x = np.asarray(x)
assert x.ndim > 0
if comm is None: comm = MPI.COMM_WORLD
xsum = x.sum(axis=axis, keepdims=keepdims)
n = xsum.size
localsum = np.zeros(n+1, x.dtype)
localsum[:n] = xsum.ravel()
localsum[n] = x.shape[axis]
globalsum = np.zeros_like(localsum)
comm.Allreduce(localsum, globalsum, op=MPI.SUM)
return globalsum[:n].reshape(xsum.shape) / globalsum[n], globalsum[n]
示例12: update
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def update(self, x):
x = x.astype('float64')
n = int(np.prod(self.shape))
totalvec = np.zeros(n*2+1, 'float64')
addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)],dtype='float64')])
MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM)
self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:2*n].reshape(self.shape), totalvec[2*n])
示例13: test_dist
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def test_dist():
np.random.seed(0)
p1,p2,p3=(np.random.randn(3,1), np.random.randn(4,1), np.random.randn(5,1))
q1,q2,q3=(np.random.randn(6,1), np.random.randn(7,1), np.random.randn(8,1))
# p1,p2,p3=(np.random.randn(3), np.random.randn(4), np.random.randn(5))
# q1,q2,q3=(np.random.randn(6), np.random.randn(7), np.random.randn(8))
comm = MPI.COMM_WORLD
assert comm.Get_size()==2
if comm.Get_rank()==0:
x1,x2,x3 = p1,p2,p3
elif comm.Get_rank()==1:
x1,x2,x3 = q1,q2,q3
else:
assert False
rms = RunningMeanStd(epsilon=0.0, shape=(1,))
U.initialize()
rms.update(x1)
rms.update(x2)
rms.update(x3)
bigvec = np.concatenate([p1,p2,p3,q1,q2,q3])
def checkallclose(x,y):
print(x,y)
return np.allclose(x,y)
assert checkallclose(
bigvec.mean(axis=0),
rms.mean.eval(),
)
assert checkallclose(
bigvec.std(axis=0),
rms.std.eval(),
)
示例14: __init__
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None):
self.var_list = var_list
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.scale_grad_by_procs = scale_grad_by_procs
size = sum(U.numel(v) for v in var_list)
self.m = np.zeros(size, 'float32')
self.v = np.zeros(size, 'float32')
self.t = 0
self.setfromflat = U.SetFromFlat(var_list)
self.getflat = U.GetFlat(var_list)
self.comm = MPI.COMM_WORLD if comm is None else comm
示例15: setup_mpi_gpus
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import COMM_WORLD [as 别名]
def setup_mpi_gpus():
"""
Set CUDA_VISIBLE_DEVICES using MPI.
"""
num_gpus = gpu_count()
if num_gpus == 0:
return
local_rank, _ = get_local_rank_size(MPI.COMM_WORLD)
os.environ['CUDA_VISIBLE_DEVICES'] = str(local_rank % num_gpus)