本文整理汇总了Python中pypar.receive函数的典型用法代码示例。如果您正苦于以下问题:Python receive函数的具体用法?Python receive怎么用?Python receive使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了receive函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_enquiry_depths
def get_enquiry_depths(self):
# Should be called from all processors associated with operator
enq0 = None
enq1 = None
get0 = "self.inlets[0].get_enquiry_depth()"
get1 = "self.inlets[1].get_enquiry_depth()"
if self.myid == self.master_proc:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
else:
enq0 = pypar.receive(self.enquiry_proc[0])
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
else:
enq1 = pypar.receive(self.enquiry_proc[1])
else:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
pypar.send(enq0, self.master_proc)
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
pypar.send(enq1, self.master_proc)
return [enq0, enq1]
示例2: get_enquiry_water_depths
def get_enquiry_water_depths(self):
enq0 = None
enq1 = None
get0 = 'self.inlets[0].get_enquiry_water_depth()'
get1 = 'self.inlets[1].get_enquiry_water_depth()'
if self.myid == self.master_proc:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
else:
enq0 = pypar.receive(self.enquiry_proc[0])
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
else:
enq1 = pypar.receive(self.enquiry_proc[1])
else:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
pypar.send(enq0, self.master_proc)
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
pypar.send(enq1, self.master_proc)
return [enq0, enq1]
示例3: scatter_dict
def scatter_dict(whole):
"""
Broadcast and recieve a dictionary where the values are 1d arrays
and the arrays are chunked for the workers.
Only rank 0 needs the whole dictionary.
:param whole: The dictionary of 1d arrays to subdict.
:returns: (chunk of dictionary of 1d arrays, indexes of whole array)
"""
if not STATE.is_parallel:
array_len = len(whole[whole.keys()[0]])
return whole, numpy.array(range(0, array_len))
else:
import pypar # pylint: disable=W0404
if STATE.rank == 0:
array_len = len(whole[whole.keys()[0]])
for pro in range(0, STATE.size):
temp_indexes = numpy.array(range(pro, array_len, STATE.size))
temp_subdict = {}
for key in whole.keys():
temp_subdict[key] = whole[key][temp_indexes]
if pro is 0:
indexes = temp_indexes
subdict = temp_subdict
else:
pypar.send(temp_indexes, pro)
pypar.send(temp_subdict, pro)
else:
indexes = pypar.receive(0)
subdict = pypar.receive(0)
return subdict, indexes
示例4: get_enquiry_velocitys
def get_enquiry_velocitys(self):
enq0 = None
enq1 = None
get0 = "self.inlets[0].get_enquiry_velocity()"
get1 = "self.inlets[1].get_enquiry_velocity()"
if self.myid == self.master_proc:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
else:
enq0 = pypar.receive(self.enquiry_proc[0])
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
else:
enq1 = pypar.receive(self.enquiry_proc[1])
else:
if self.myid == self.enquiry_proc[0]:
enq0 = eval(get0)
pypar.send(enq0, self.master_proc)
if self.myid == self.enquiry_proc[1]:
enq1 = eval(get1)
pypar.send(enq1, self.master_proc)
return [enq0, enq1]
示例5: balanceArrays
def balanceArrays(self, arrayFragment):
'''Redistributes the elements in a set of arrays equally across the nodes'''
if Environment.isParallel:
import pypar
if self.isRoot():
completeArray = arrayFragment
for i in range(1, pypar.size()):
fragment = pypar.receive(i)
completeArray.extend(fragment)
#Divide it up
divisions = self._divideArray(completeArray)
#Send the fragments
for i in range(1, pypar.size()):
start, end = divisions[i]
pypar.send(completeArray[start:end], i)
self.output('[ENV] Rebalanced array divisions %s' % divisions)
#Assign root fragment
start, end = divisions[0]
arrayFragment = completeArray[start:end]
else:
#Send the fragment
pypar.send(arrayFragment, 0)
#Retrieve the array
arrayFragment = pypar.receive(0)
else:
completeArray = arrayFragment
return arrayFragment
示例6: master
def master(self):
self.numCompleted = 0
self.mapList = list()
logging.info('[MASTER]: started processor %d of %d on node %s: number of works: %d'%(self.MPI_myid, self.MPI_numproc, self.MPI_node, self.numWorks))
# start slaves distributing the first work slot
rounder = 0
if self.MPI_numproc <= self.numWorks:
rounder = 1
for i in range(min(self.MPI_numproc, self.numWorks)-rounder):
work = self.works[i]
pypar.send(work, destination=i+1, tag=self.WORKTAG)
logging.debug('[MASTER]: sent work "%s" to node %d' %(work, i+1))
# dispatch the remaining work slots on dynamic load-balancing policy
# the quicker to do the job, the more jobs it takes
for work in self.works[self.MPI_numproc-1:]:
result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
return_status=True)
logging.debug('[MASTER]: received result "%s" from node %d'%(result, status.source))
self.mapList.append(result)
self.numCompleted += 1
logging.debug('[MASTER]: done : %d' %self.numCompleted)
pypar.send(work, destination=status.source, tag=self.WORKTAG)
logging.debug('[MASTER]: sent work "%s" to node %d' %(work, status.source))
# all works have been dispatched out
logging.debug('[MASTER]: toDo : %d' %self.numWorks)
logging.debug('[MASTER]: done : %d' %self.numCompleted)
# I've still to take into the remaining completions
while (self.numCompleted < self.numWorks):
result, status = pypar.receive(source=pypar.any_source, tag=self.WORKTAG,
return_status=True)
logging.debug('[MASTER]: received (final) result "%s" from node %d'%(result, status.source))
self.mapList.append(result)
self.numCompleted += 1
logging.debug('[MASTER]: %d completed' %self.numCompleted)
logging.debug('[MASTER]: about to terminate slaves')
# Tell slaves to stop working
for i in range(1, self.MPI_numproc):
pypar.send('#', destination=i, tag=self.DIETAG)
logging.debug('[MASTER]: sent termination signal to node %d' %(i, ))
# call the reduce function
logging.info('[MASTER]: about to run reduce')
res = self.reduceFunction(self.mapList)
return res
示例7: splitArray
def splitArray(self, array):
'''Splits array between all the processes in the environment.
Each process will be returned a different section of the array to work on'''
if Environment.isParallel:
import pypar
#Split the array into sections and return the section for this processor
divisions = []
if self.isRoot():
#Root does the splitting - we send each processor the start and end index
#NOTE: pypar broadcast won't work even when setting vanilla
#It always returns message trucated error.
divisions = self._divideArray(array)
for i in range(1,pypar.size()):
pypar.send(divisions[i], i)
start = divisions[0][0]
end = divisions[0][1]
else:
indexes = pypar.receive(0)
start = indexes[0]
end = indexes[1]
return array[start:end]
else:
return array
示例8: get_global_average_elevation
def get_global_average_elevation(self):
# GLOBAL: Master processor gathers elevations from all child processors, and returns average
# WARNING: requires synchronization, must be called by all procs associated
# with this inlet
import pypar
local_elevation = num.sum(self.get_elevations()*self.get_areas())
global_area = self.get_global_area()
global_elevation = local_elevation
if self.myid == self.master_proc:
for i in self.procs:
if i == self.master_proc: continue
val = pypar.receive(i)
global_elevation = global_elevation + val
else:
pypar.send(local_elevation, self.master_proc)
if global_area > 0.0:
return global_elevation/global_area
else:
return 0.0
示例9: distributed_generator
def distributed_generator(iterable):
"""
Distribute the values from a generator across workers.
"""
RUN, DIE = range(2)
P = pp.size()
if P == 1:
for el in iterable:
yield el
else:
if pp.rank() == 0:
it = iter(iterable)
while True:
try:
first = next(it)
for p in range(1, P):
pp.send(next(it), p, tag=RUN)
yield first
except StopIteration:
for p in range(1, P):
pp.send(666, p, tag=DIE)
break
else:
while True:
el, status = pp.receive(0, tag=pp.any_tag, return_status=True)
if status.tag == DIE:
break
yield el
示例10: scatter
def scatter( vec ):
if root():
for i in xrange(p.size()-1):
p.send(vec[i+1],i+1)
return vec[0]
else:
return p.receive(0)
示例11: test_longint_array
def test_longint_array(self):
myid, ncpu = pp.rank(), pp.size()
N = 17 # Number of elements
if myid == 0:
A = np.array(range(N)).astype('l')
B = np.zeros(N).astype('l')
pp.send(A, 1, use_buffer=True)
B = pp.receive(ncpu - 1, buffer=B)
self.assertTrue(np.allclose(A, B))
else:
X = np.zeros(N).astype('l')
X = pp.receive(myid - 1, buffer=X)
pp.send(X, (myid + 1) % ncpu, use_buffer=True)
示例12: broadcast
def broadcast( obj ):
if root():
for i in xrange(p.size()-1):
p.send(obj,i+1)
return obj
else:
return p.receive(0)
示例13: test_mpi_recvsend
def test_mpi_recvsend(target_array, message_range, target_rank, tag_mark):
send_range = message_range[0]
recv_range = message_range[1]
target_array[recv_range] = mpi.receive(target_rank,tag=tag_mark)
# print 'I`m', myrank, 'Recv : ', target_rank, 'range : ', recv_range
mpi.send(target_array[send_range].copy(), target_rank, tag=tag_mark)
示例14: master
def master():
numCompleted = 0
print '[MASTER]: I am processor %d of %d on node %s'\
%(MPI_myid, MPI_numproc, MPI_node)
# start slaves distributing the first work slot
for i in range(1, min(MPI_numproc, numWorks)):
work = workList[i]
pypar.send(work, destination=i, tag=WORKTAG)
print '[MASTER]: sent work "%s" to node %d' %(work, i)
# dispatch the remaining work slots on dynamic load-balancing policy
# the quicker to do the job, the more jobs it takes
for work in workList[MPI_numproc:]:
result, status = pypar.receive(source=pypar.any_source, tag=WORKTAG,
return_status=True)
print '[MASTER]: received result "%s" from node %d'\
%(result, status.source)
numCompleted += 1
pypar.send(work, destination=status.source, tag=WORKTAG)
print '[MASTER]: sent work "%s" to node %d' %(work, status.source)
# all works have been dispatched out
print '[MASTER]: toDo : %d' %numWorks
print '[MASTER]: done : %d' %numCompleted
# I've still to take into the remaining completions
while (numCompleted < numWorks):
result, status = pypar.receive(source=pypar.any_source,
tag=WORKTAG,
return_status=True)
print '[MASTER]: received (final) result "%s" from node %d'\
%(result, status.source)
numCompleted += 1
print '[MASTER]: %d completed' %numCompleted
print '[MASTER]: about to terminate slaves'
# Tell slaves to stop working
for i in range(1, MPI_numproc):
pypar.send('#', destination=i, tag=DIETAG)
print '[MASTER]: sent termination signal to node %d' %(i, )
return
示例15: broadcast_vec
def broadcast_vec( vec, i ):
myid = p.rank()
if myid == i:
for j in xrange(p.size()):
if j != myid:
p.send(vec[i],j)
else:
vec[i] = p.receive(i)