本文整理汇总了Python中mpi4py.MPI.DOUBLE属性的典型用法代码示例。如果您正苦于以下问题:Python MPI.DOUBLE属性的具体用法?Python MPI.DOUBLE怎么用?Python MPI.DOUBLE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mpi4py.MPI
的用法示例。
在下文中一共展示了MPI.DOUBLE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_array_buffer
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def get_array_buffer(vs, arr):
from mpi4py import MPI
MPI_TYPE_MAP = {
'int8': MPI.CHAR,
'int16': MPI.SHORT,
'int32': MPI.INT,
'int64': MPI.LONG,
'int128': MPI.LONG_LONG,
'float32': MPI.FLOAT,
'float64': MPI.DOUBLE,
'bool': MPI.BOOL,
}
if rs.backend == 'bohrium':
if np.check(arr):
buf = np.interop_numpy.get_array(arr)
else:
buf = arr
else:
buf = arr
return [buf, arr.size, MPI_TYPE_MAP[str(arr.dtype)]]
示例2: _eval_models
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def _eval_models(self, models, it):
n = models.shape[0]
if self._mpi:
starttime_parallel = MPI.Wtime()
fit = np.zeros(n)
fit_mpi = np.zeros_like(fit)
self._mpi_comm.Barrier()
self._mpi_comm.Bcast([ models, MPI.DOUBLE ], root = 0)
for i in np.arange(self._mpi_rank, n, self._mpi_size):
fit_mpi[i] = self._func(self._unstandardize(models[i]))
self._mpi_comm.Barrier()
self._mpi_comm.Allreduce([ fit_mpi, MPI.DOUBLE ], [ fit, MPI.DOUBLE ],
op = MPI.SUM)
self._time_parallel[it-1] = MPI.Wtime() - starttime_parallel
else:
fit = np.array([ self._func(self._unstandardize(models[i])) for i in range(n) ])
self._n_eval += n
return fit
示例3: collect_all_XY
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def collect_all_XY(self, root=0):
if self.mpi_comm is None:
XY = [self.obslayer.Y.copy()]
for l in self.layers: XY.append(l.X.copy())
return XY
else:
from mpi4py import MPI
from GPy.core.parameterization.variational import NormalPosterior
N,D = self.Y.shape
N_list = np.array(self.mpi_comm.allgather(N))
N_all = np.sum(N_list)
Y_all = np.empty((N_all,D)) if self.mpi_comm.rank==root else None
self.mpi_comm.Gatherv([self.Y, MPI.DOUBLE], [Y_all, (N_list*D, None), MPI.DOUBLE], root=root)
if self.mpi_comm.rank==root:
XY = [Y_all]
for l in self.layers:
Q = l.X.shape[1]
X_mean_all = np.empty((N_all,Q)) if self.mpi_comm.rank==root else None
self.mpi_comm.Gatherv([l.X.mean.values, MPI.DOUBLE], [X_mean_all, (N_list*Q, None), MPI.DOUBLE], root=root)
X_var_all = np.empty((N_all,Q)) if self.mpi_comm.rank==root else None
self.mpi_comm.Gatherv([l.X.variance.values, MPI.DOUBLE], [X_var_all, (N_list*Q, None), MPI.DOUBLE], root=root)
if self.mpi_comm.rank==root:
XY.append(NormalPosterior(X_mean_all, X_var_all))
if self.mpi_comm.rank==root: return XY
else: return None
示例4: async_fetch_weights_async
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def async_fetch_weights_async(self):
# deprecated
request_layers = []
layers_to_update = []
for layer_idx, layer in enumerate(self.model_recv_buf.recv_buf):
if self.model_recv_buf.layer_cur_step[layer_idx] < self.cur_step:
layers_to_update.append(layer_idx)
req = self.comm.Irecv([self.model_recv_buf.recv_buf[layer_idx], MPI.DOUBLE], source=0, tag=11+layer_idx)
request_layers.append(req)
assert (len(layers_to_update) == len(request_layers))
weights_to_update = []
for req_idx, req_l in enumerate(request_layers):
req_l.wait()
weights = self.model_recv_buf.recv_buf[req_idx]
weights_to_update.append(weights)
# we also need to update the layer cur step here:
self.model_recv_buf.layer_cur_step[req_idx] = self.cur_step
self.model_update(weights_to_update)
示例5: _send_grads
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def _send_grads(self):
req_send_check = []
encode_time_counter_ = 0
for p_index, p in enumerate(self.network.parameters()):
if self._device.type == "cuda":
grad = p.grad.to(torch.device("cpu")).detach().numpy().astype(np.float32)
else:
grad = p.grad.detach().numpy().astype(np.float32)
# wait until grad of last layer shipped to PS
if len(req_send_check) != 0:
req_send_check[-1].wait()
if self._compress_grad == "compress":
_compressed_grad = g_compress(grad)
req_isend = self.comm.isend(_compressed_grad, dest=0, tag=88+p_index)
req_send_check.append(req_isend)
else:
req_isend = self.comm.Isend([grad, MPI.DOUBLE], dest=0, tag=88+p_index)
req_send_check.append(req_isend)
req_send_check[-1].wait()
示例6: runParallel
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def runParallel():
from Florence.FiniteElements.Assembly._LowLevelAssembly_ import _LowLevelAssemblyExplicit_Par_
from Florence import DisplacementFormulation, DisplacementPotentialFormulation
comm = MPI.Comm.Get_parent()
size = comm.Get_size()
rank = comm.Get_rank()
T_all_size = np.empty(3,'i')
comm.Bcast(T_all_size, root=0)
funcs = None
funcs = comm.bcast(funcs, root=0)
nnode = T_all_size[0]
ndim = T_all_size[1]
nvar = T_all_size[2]
T_all = np.zeros((nnode,nvar),np.float64)
Eulerx = np.zeros((nnode,ndim),np.float64)
comm.Bcast([Eulerx, MPI.DOUBLE], root=0)
Eulerp = np.zeros((nnode),np.float64)
comm.Bcast([Eulerp, MPI.DOUBLE], root=0)
for proc in range(size):
if proc == rank:
functor = funcs[proc]
pnodes = funcs[proc].pnodes
# tt = time()
T = _LowLevelAssemblyExplicit_Par_(functor.formulation.function_spaces[0],
functor.formulation, functor.mesh, functor.material, Eulerx[pnodes,:], Eulerp[pnodes])
T_all[pnodes,:] += T.reshape(pnodes.shape[0],nvar)
# print(time()-tt)
comm.Reduce([T_all, MPI.DOUBLE], None, root=0)
comm.Disconnect()
示例7: numpy_to_MPI_typemap
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def numpy_to_MPI_typemap(np_type):
from mpi4py import MPI
typemap = {
np.dtype(np.float64) : MPI.DOUBLE,
np.dtype(np.float32) : MPI.FLOAT,
np.dtype(np.int) : MPI.INT,
np.dtype(np.int8) : MPI.CHAR,
np.dtype(np.uint8) : MPI.UNSIGNED_CHAR,
np.dtype(np.int32) : MPI.INT,
np.dtype(np.uint32) : MPI.UNSIGNED_INT,
}
return typemap[np_type]
示例8: synchronize_locations
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def synchronize_locations(self, start_loc_local, end_loc_local, Debug=False):
"""
Gathers the scores from all the updated locations, and propagates them across the processes.
"""
base = int((len(self.scores)/self.scores_per_location) / self.mpi.size)
leftover = int((len(self.scores)/self.scores_per_location) % self.mpi.size)
if Debug:
print("Sync Locs:", self.mpi.rank, base, leftover, len(self.scores), file=sys.stderr)
sizes = np.ones(self.mpi.size, dtype='i')*base
sizes[:leftover] += 1
sizes *= self.scores_per_location
offsets = np.zeros(self.mpi.size, dtype='i')
offsets[1:] = np.cumsum(sizes)[:-1]
assert np.sum(sizes) == len(self.scores)
assert offsets[-1] + sizes[-1] == len(self.scores)
# Populate scores array
scores_start = int(offsets[self.mpi.rank])
local_scores_size = int(sizes[self.mpi.rank])
local_scores = self.scores[scores_start:scores_start+local_scores_size].copy()
if Debug and self.mpi.rank == 0:
print("start of synchronize_locations MPI call.", file=sys.stderr)
#print(self.mpi.rank, local_scores, self.scores, sizes, offsets)
self.mpi.comm.Allgatherv(local_scores, [self.scores, sizes, offsets, MPI.DOUBLE])
if Debug and self.mpi.rank == 0:
print("end of synchronize_locations", file=sys.stderr)
示例9: gather
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def gather(self,x,y):
# if self.myrank==0:
# print("gather",x.shape,self.sbuff.shape,self.rbuff.shape,self.np,self.mp,self.n1,self.m1)
for k in range(self.nbtimes):
self.localcomm.Allgatherv(x.ravel(),
[self.rbuff,self.sizes,self.offsets,MPI.DOUBLE])
b = self.rbuff.reshape( (self.mp,self.np,self.m,self.n))
buffertodomain(b,y,self.nh,self.m1,self.n1)
示例10: paired_update
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def paired_update(comm, previous_encounters_s, Count_sz_local, Count_sz_pair, \
Count_sz_others, P_local, P_pair):
rank = comm.rank
comm.isend(rank, dest=MASTER, tag=Msg.PAIRME.value)
pair_id = comm.recv(source=MASTER, tag=Msg.PAIRED.value)
if pair_id == rank: #Paired with self, do nothing
return False
elif pair_id < rank:
comm.Recv([Count_sz_pair, MPI.INT], source=pair_id)
comm.Recv([P_pair, MPI.DOUBLE], source=pair_id)
comm.Send([Count_sz_local, MPI.INT], dest=pair_id)
comm.Send([P_local, MPI.DOUBLE], dest=pair_id)
else:
comm.Send([Count_sz_local, MPI.INT], dest=pair_id)
comm.Send([P_local, MPI.DOUBLE], dest=pair_id)
comm.Recv([Count_sz_pair, MPI.INT], source=pair_id)
comm.Recv([P_pair, MPI.DOUBLE], source=pair_id)
#Update Counts
#[:] is to avoid copies of arrays. Make sure we dont lose anything
N_til_s = previous_encounters_s[pair_id]
Count_sz_others[:] = Count_sz_others + Count_sz_pair - N_til_s
N_til_s[:] = Count_sz_pair
P_local[:] = (P_local + P_pair) / 2.0
return True
示例11: receive_workload
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def receive_workload(comm):
sizes = np.zeros(6, dtype='i')
comm.Recv([sizes, MPI.INT], source=MASTER)
num_lines = sizes[0]
nz = sizes[1]
nh = sizes[2]
ns = sizes[3]
n_residency_priors = sizes[4]
mem_size = sizes[5]
Count_zh = np.zeros(shape=(nz, nh), dtype='i4')
Count_sz = np.zeros(shape=(ns, nz), dtype='i4')
count_h = np.zeros(shape=(nh, ), dtype='i4')
count_z = np.zeros(shape=(nz, ), dtype='i4')
Dts = np.zeros(shape=(num_lines, mem_size), dtype='f8')
Trace = np.zeros(shape=(num_lines, 2 + (mem_size + 1)), dtype='i4')
comm.Recv([Dts, MPI.DOUBLE], source=MASTER)
comm.Recv([Trace, MPI.INT], source=MASTER)
priors = np.zeros(2 + n_residency_priors, dtype='f8')
comm.Recv([priors, MPI.DOUBLE], source=MASTER)
alpha_zh = priors[0]
beta_zs = priors[1]
residency_priors = priors[2:]
kernel_class = comm.recv(source=MASTER)
P = np.zeros(shape=(nz, n_residency_priors), dtype='f8')
comm.Recv([P, MPI.DOUBLE], source=MASTER)
kernel = kernel_class()
kernel.build(Trace.shape[0], Count_zh.shape[0], residency_priors)
if n_residency_priors > 0:
kernel.update_state(P)
return Dts, Trace, Count_zh, Count_sz, \
count_h, count_z, alpha_zh, beta_zs, kernel
示例12: work
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def work():
comm = MPI.COMM_WORLD
rank = comm.rank
#pr = cProfile.Profile()
#pr.enable()
while True:
status = MPI.Status()
msg = comm.recv(source=MASTER, tag=MPI.ANY_TAG, status=status)
event = status.Get_tag()
if event == Msg.LEARN.value:
comm.isend(rank, dest=MASTER, tag=Msg.STARTED.value)
num_iter = msg
Dts, Trace, Count_zh, Count_sz, count_h, count_z, \
alpha_zh, beta_zs, kernel = receive_workload(comm)
fast_populate(Trace, Count_zh, Count_sz, count_h, \
count_z)
sample(Dts, Trace, Count_zh, Count_sz, count_h, \
count_z, alpha_zh, beta_zs, kernel, num_iter, \
comm)
comm.isend(rank, dest=MASTER, tag=Msg.FINISHED.value)
elif event == Msg.SENDRESULTS.value:
comm.Send([np.array(Trace[:, -1], order='C'), MPI.INT], dest=MASTER)
comm.Send([Count_zh, MPI.INT], dest=MASTER)
comm.Send([Count_sz, MPI.INT], dest=MASTER)
comm.Send([count_h, MPI.INT], dest=MASTER)
comm.Send([count_z, MPI.INT], dest=MASTER)
comm.Send([kernel.get_state(), MPI.DOUBLE], dest=MASTER)
elif event == Msg.STOP.value:
break
else:
print('Unknown message received', msg, event, Msg(event))
#pr.disable()
#pr.dump_stats('worker-%d.pstats' % rank)
示例13: dispatch_jobs
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def dispatch_jobs(Dts, Trace, Count_zh, Count_sz, count_h, \
count_z, alpha_zh, beta_zs, kernel, residency_priors, \
workloads, num_workers, comm):
for worker_id in xrange(1, num_workers + 1):
idx = workloads[worker_id - 1]
sizes = np.zeros(6, dtype='i')
sizes[0] = Trace[idx].shape[0]
sizes[1] = Count_zh.shape[0]
sizes[2] = Count_zh.shape[1]
sizes[3] = Count_sz.shape[0]
sizes[4] = residency_priors.shape[0]
sizes[5] = Dts.shape[1]
comm.Send([sizes, MPI.INT], dest=worker_id)
comm.Send([Dts[idx], MPI.INT], dest=worker_id)
comm.Send([Trace[idx], MPI.INT], dest=worker_id)
priors = np.zeros(2 + residency_priors.shape[0], dtype='f8')
priors[0] = alpha_zh
priors[1] = beta_zs
priors[2:] = residency_priors
comm.Send([priors, MPI.DOUBLE], dest=worker_id)
comm.send(kernel.__class__, dest=worker_id)
comm.Send([kernel.get_state(), MPI.DOUBLE], dest=worker_id)
示例14: async_fetch_gradient_start
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def async_fetch_gradient_start(self):
'''make gradient fetch requests and return the request list'''
gradient_fetch_requests = [] # `graident_fetch_request` should have length of #fc_layer*num_grad_to_collect
for layer_idx, layer in enumerate(self.network.parameters()):
for k in range(self._num_grad_to_collect):
if self._compress_grad == 'compress':
req = self.comm.irecv(self.grad_accumulator.gradient_aggregator[layer_idx][k], source=k+1, tag=88+layer_idx)
else:
req = self.comm.Irecv([self.grad_accumulator.gradient_aggregator[layer_idx][k], MPI.DOUBLE], source=k+1, tag=88+layer_idx)
gradient_fetch_requests.append(req)
return gradient_fetch_requests
示例15: test_dtype_to_mpi
# 需要导入模块: from mpi4py import MPI [as 别名]
# 或者: from mpi4py.MPI import DOUBLE [as 别名]
def test_dtype_to_mpi(self):
reload(util)
assert util.dtype_to_mpi(np.dtype('bool')) == MPI.C_BOOL
assert util.dtype_to_mpi(np.dtype('int8')) == MPI.INT8_T
assert util.dtype_to_mpi(np.dtype('uint8')) == MPI.UINT8_T
assert util.dtype_to_mpi(np.dtype('int16')) == MPI.INT16_T
assert util.dtype_to_mpi(np.dtype('uint16')) == MPI.UINT16_T
assert util.dtype_to_mpi(np.dtype('int32')) == MPI.INT32_T
assert util.dtype_to_mpi(np.dtype('uint32')) == MPI.UINT32_T
assert util.dtype_to_mpi(np.dtype('int64')) == MPI.INT64_T
assert util.dtype_to_mpi(np.dtype('uint64')) == MPI.UINT64_T
assert util.dtype_to_mpi(np.dtype('float32')) == MPI.FLOAT
assert util.dtype_to_mpi(np.dtype('float64')) == MPI.DOUBLE
assert util.dtype_to_mpi(np.dtype('complex64')) == MPI.C_FLOAT_COMPLEX
assert util.dtype_to_mpi(np.dtype('complex128')) == MPI.C_DOUBLE_COMPLEX