本文整理汇总了Python中pypar.barrier函数的典型用法代码示例。如果您正苦于以下问题:Python barrier函数的具体用法?Python barrier怎么用?Python barrier使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了barrier函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestSolveOverlapSpeed
def TestSolveOverlapSpeed():
def timeIt(func):
t1 = time.time()
func()
t2 = time.time()
Print(" Function '%s' took %4.1f s." % (func.func_name, (t2-t1)))
numSolves = 100
Print("")
Print("Now testing multiple S^-1 * psi...")
pyprop.Redirect.Enable(silent=True)
seed(0)
conf = pyprop.Load("config_eigenvalues.ini")
psi = pyprop.CreateWavefunction(conf)
tmpPsi = psi.Copy()
Print(" Size of wavefunction is: %s" % repr(psi.GetData().shape))
#Calculate S^-1 * psi
Print(" Performing %i solves..." % numSolves)
def solve():
for i in range(numSolves):
psi.GetRepresentation().MultiplyOverlap(tmpPsi)
timeIt(solve)
#finish and cleanup
pypar.barrier()
pyprop.Redirect.Disable()
Print("\n...done!")
示例2: wait
def wait(self, error=False):
'''This method will not return until all process in the environment have called it.
This is a wrapper around MPI_Barrier which handles the case where MPI is not available'''
from inspect import stack
if self.verbose is True:
string = '(%s) Waiting at line %d of %s' % (datetime.datetime.now().strftime('%H:%M:%S'),
stack()[1][0].f_lineno, stack()[1][0].f_code.co_filename)
self.log(string)
if Environment.isParallel:
import pypar
pypar.barrier()
#Because MPI_ABORT doesn't work in pypar if called from one process
#we need a way for process to communicate to each other if an error occurred
#during the code they executed before this barrier. We do a scatter/gather of
#the error parameter - This isn't very efficient but it's all we can do now
errors = self.combineArray([error])
if True in errors:
self.exit(1)
if self.verbose is True:
string = '(%s) Finished waiting' % (datetime.datetime.now().strftime('%H:%M:%S'))
self.log(string)
示例3: run
def run():
"""
Run the process, handling any parallelisation.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--config",
help="Configuration file",
type=str)
parser.add_argument("-i", "--inputfile",
help="Input DEM file (ascii format)",
type=str)
parser.add_argument("-o", "--output",
help="Output path",
type=str)
parser.add_argument("-v", "--verbose",
help=("Verbose output (not available when invoking"
"parallel run)") )
args = parser.parse_args()
logfile = 'topomult.log'
loglevel = 'INFO'
if args.verbose:
verbose = args.verbose
else:
verbose = False
if args.config:
cfg = ConfigParser.ConfigParser()
cfg.read(args.config)
input_file = cfg.get('Input', 'Filename')
output_path = cfg.get('Output', 'Path')
logfile = cfg.get('Logging', 'LogFile')
loglevel = cfg.get('Logging', 'LogLevel')
verbose = cfg.get('Logging', 'Verbose')
if args.inputfile:
input_file = args.inputfile
if args.output:
output_path = args.output
attemptParallel()
if pp.size() > 1 and pp.rank() > 0:
logfile += '-' + str(pp.rank())
verbose = False # to stop output to console
flStartLog(logfile, loglevel, verbose)
pp.barrier()
work(input_file, output_path,
['n','s','e','w','ne','nw','se','sw'])
pp.barrier()
pp.finalize()
示例4: barrier
def barrier(self):
"""
Synchronisation point. Makes processors wait until all
processors have reached this point.
"""
if self.is_parallel is True:
import pypar
pypar.barrier()
示例5: abnormalexit
def abnormalexit(reason):
"""this tells each worker node to exit, then kills the server process.
this should only be called by the server node"""
print 'abnormal exit'
print reason
sendtoall(('Die', 0))
pypar.barrier()
pypar.finalize()
sys.exit(2)
示例6: CreatePath
def CreatePath(absFileName):
"""Create directories in abspath
"""
logger = GetFunctionLogger()
if pyprop.ProcId == 0:
filePath = os.path.dirname(absFileName)
if not os.path.exists(filePath) and len(filePath) > 0:
logger.debug("Creating folder: %s" % filePath)
os.makedirs(filePath)
pypar.barrier()
示例7: SerialPrint
def SerialPrint(str, proc=-1):
if ProcCount == 1:
print str
else:
if proc==-1: procList = range(ProcCount)
else: procList = [proc]
for i in procList:
if i == ProcId:
print "Proc %4i: %s" % (ProcId, str,)
sys.stdout.flush()
pypar.barrier()
示例8: callback
def callback(self, prop):
if self.StoreDuringPropagation:
#create unique filename
filename = "%s_%03i.h5" % (self.OutputFileName.strip(".h5"), self.Counter)
#store current wavefunction and propagation time
prop.SaveWavefunctionHDF(filename, "/wavefunction")
if pyprop.ProcId == 0:
with tables.openFile(filename, "r+", MAX_THREADS=1) as h5:
h5.setNodeAttr("/wavefunction", "prop_time", prop.PropagatedTime)
pypar.barrier()
self.Counter += 1
示例9: TestEpetraMatvecSpeed
def TestEpetraMatvecSpeed():
numMatVecs = 500
Print("")
Print("Now testing Epetra matvec speed...")
pyprop.Redirect.Enable(silent=True)
#Test
conf = pyprop.Load("config_propagation.ini")
psi = pyprop.CreateWavefunction(conf)
Print(" Size of wavefunction is: %s" % repr(psi.GetData().shape))
#Setup problem
Print(" Setting up propagator w/potentials...")
prop = SetupProblem(config='config_propagation.ini')
psi = prop.psi
tmpPsi = psi.Copy()
tmpPsi.Clear()
Print(" Local size of wavefunction is: %s" % str(prop.psi.GetData().shape))
Print(" Global size of wavefunction is: %s" % str(prop.psi.GetRepresentation().GetFullShape()))
#Get Epetra potential
#pot = prop.Propagator.BasePropagator.PotentialList[1]
Print(" Number of potentials: %s" % len(prop.Propagator.BasePropagator.PotentialList))
#Calculate S^-1 * psi
Print(" Performing %i matvecs..." % numMatVecs)
def matvecs():
for i in range(numMatVecs):
#pot.MultiplyPotential(psi, tmpPsi, 0, 0)
prop.Propagator.BasePropagator.MultiplyHamiltonianNoOverlap(psi, tmpPsi, 0, 0)
#tmpPsi.GetRepresentation().SolveOverlap(tmpPsi)
timeIt(matvecs)
#finish and cleanup
pypar.barrier()
pyprop.Redirect.Disable()
pyprop.PrintOut("\n...done!")
示例10: test_lock
def test_lock(Nmpi,fields,pbc_opt=None):
if myrank == 0:
print 'PBC : %s, start' % pbc_opt
mpi.barrier()
for i in xrange(len(fields)):
fields[i][:,:,:6] = 1.
fields[i][:,:,6:] = 0.
#print 'I`m', myrank,'Field %s Direction x1 sum before = '%i,fields[i][:,:,6].sum()
#print 'I`m', myrank,'Field %s Direction x2 sum before = '%i,fields[i][:,:,7].sum()
#print 'I`m', myrank,'Field %s Direction y1 sum before = '%i,fields[i][:,:,8].sum()
#print 'I`m', myrank,'Field %s Direction y2 sum before = '%i,fields[i][:,:,9].sum()
#print 'I`m', myrank,'Field %s Direction z1 sum before = '%i,fields[i][:,:,10].sum()
#print 'I`m', myrank,'Field %s Direction z2 sum before = '%i,fields[i][:,:,11].sum()
mpi.barrier()
if myrank != 0:
targets = MPI.calc_mpitarget(Nmpi, myrank)
targets_pbc = MPI.calc_mpitarget_pbc(Nmpi, myrank, pbc_opt)
message_range = MPI.test_making_message_range()
MPI.test_mpi_exchange(fields, Nmpi, myrank, targets, message_range)
MPI.test_mpi_exchange_pbc(fields, myrank,targets_pbc, message_range, pbc_opt)
for i in xrange(len(fields)):
print 'I`m', myrank,'Field %s Direction x1 sum after = '%i,fields[i][:,:,6].sum()
print 'I`m', myrank,'Field %s Direction x2 sum after = '%i,fields[i][:,:,7].sum()
print 'I`m', myrank,'Field %s Direction y1 sum after = '%i,fields[i][:,:,8].sum()
print 'I`m', myrank,'Field %s Direction y2 sum after = '%i,fields[i][:,:,9].sum()
print 'I`m', myrank,'Field %s Direction z1 sum after = '%i,fields[i][:,:,10].sum()
print 'I`m', myrank,'Field %s Direction z2 sum after = '%i,fields[i][:,:,11].sum()
mpi.barrier()
if myrank == 0:
print 'PBC : %s, Done' % pbc_opt
print
print
print
示例11: main
def main():
# Ensure all Processors are ready
pypar.barrier()
print "Processor %d is ready" % (myid)
# Connect to MySQL db
db = MySQLdb.connect(host="localhost",
user = "root",
passwd = "samsung",
db = "sat")
cur = db.cursor()
# Option parser from wrapper script
parser = optparse.OptionParser()
# PDB
parser.add_option("-p", "--pdb",
help="Choose all or a pdb id",
dest="pdb", default ="all")
# PDB directory
parser.add_option("-d", "--dir",
help="i",
dest="i", default ="all")
parser.add_option("-m", "--mutationList",
help="Location of mutation list file",
dest="m", default="ALA")
(opts, args) = parser.parse_args()
# Run calculations
do_run(opts.pdb, opts.i, cur, db, opts.m)
# Finalize and exit
pypar.finalize()
示例12: mrmpi
if a+b+c+d != 1.0:
if me == 0: print "ERROR: a,b,c,d must sum to 1"
sys.exit()
if fraction >= 1.0:
if me == 0: print "ERROR: fraction must be < 1"
sys.exit()
random.seed(seed+me)
order = 1 << nlevels
mr = mrmpi()
# loop until desired number of unique nonzero entries
pypar.barrier()
tstart = pypar.time()
niterate = 0
ntotal = (1 << nlevels) * nnonzero
nremain = ntotal
while nremain:
niterate += 1
ngenerate = nremain/nprocs
if me < nremain % nprocs: ngenerate += 1
mr.map(nprocs,generate,None,1)
nunique = mr.collate()
if nunique == ntotal: break
mr.reduce(cull)
nremain = ntotal - nunique
示例13: run_multiple_windfields
def run_multiple_windfields(scenario,
windfield_directory=None,
hazard_output_folder=None,
dircomment=None,
echo=False,
verbose=True):
"""Run volcanic ash impact model for multiple wind fields.
The wind fields are assumed to be in subfolder specified by windfield_directory,
have the extension *.profile and follow the format use with scenarios.
This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
"""
try:
import pypar
except:
P = 1
p = 0
processor_name = os.uname()[1]
print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
else:
time.sleep(1)
P = pypar.size()
p = pypar.rank()
processor_name = pypar.get_processor_name()
print 'Processor %d initialised on node %s' % (p, processor_name)
pypar.barrier()
if p == 0:
# Put logs along with the results
logdir = os.path.join(hazard_output_folder, 'logs')
makedir(logdir)
header('Hazard modelling using multiple wind fields')
print '* Wind profiles obtained from: %s' % windfield_directory
print '* Scenario results stored in: %s' % hazard_output_folder
print '* Log files:'
t_start = time.time()
# Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
for i in range(P):
pypar.send((hazard_output_folder), i)
else:
# Receive correctly timestamped output directory names
hazard_output_folder = pypar.receive(0)
logdir = os.path.join(hazard_output_folder, 'logs')
try:
name = os.path.splitext(scenario)[0]
except:
name = 'run'
# Wait until log dir has been created
pypar.barrier()
params = get_scenario_parameters(scenario)
# Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
time.sleep(2*p)
# Logging
s = 'Proc %i' % p
print ' %s -' % string.ljust(s, 8),
AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
start_logging(filename=AIM_logfile, echo=False)
# Get cracking
basename, _ = os.path.splitext(scenario)
count_local = 0
count_all = 0
for i, file in enumerate(os.listdir(windfield_directory)):
count_all += 1
# Distribute jobs cyclically to processors
if i%P == p:
if not file.endswith('.profile'):
continue
count_local += 1
windfield = '%s/%s' % (windfield_directory, file)
windname, _ = os.path.splitext(file)
header('Computing event %i on processor %i using wind field: %s' % (i, p, windfield))
if dircomment is None:
dircomment = params['eruption_comment']
#.........这里部分代码省略.........
示例14: main
def main():
#--------------------#
# server code
#--------------------#
if rank == 0:
print 'server running on ', procname
opts = task(sys.argv)
opts.printruninfo()
sendtoall(('Start', sys.argv))
server = serverdata(opts)
#set up the collector and generator
start = time.time()
collector = resultcollector(server)
end = time.time()
print end-start
jobs = jobgenerator(server)
numjobsreceived = 0
#begin distributing work
for proc in xrange(1, min(numnodes, jobs.numjobs+1)):
job = jobs.next(proc)
pypar.send(('job',job), proc, tag=OUT)
while numjobsreceived < jobs.jobindex:#while any job is still running
#wait for any node to send a result
msg, status = pypar.receive(pypar.any_source, return_status=True, tag=RETURN)
numjobsreceived += 1
proc, response = msg
if jobs.hasnext(proc):#see if there is more work to be done
job = jobs.next(proc)
pypar.send(('job',job), proc, tag=OUT)#send it to the node that just completed
#combine the results *after* sending the new job
#(this way the worker can proceed while the results are being combined)
collector.collect(response)
#all jobs collected, kill the workers
sendtoall(('Done', 0))
#finish up the computation
collector.finish()
#--------------------#
# worker code
#--------------------#
else:
while True:
start = time.time()
(code, msg), status = pypar.receive(0, return_status=True, tag=OUT)
end = time.time()
print 'waiting', end-start
if code == 'Done':#all work is done
opts.printruninfo()
break
elif code == 'Die':#abnormal exit
break
elif code == 'Start':
opts = task(msg)
sys.stdout = open(opts.logprefix+'%02d.log'%rank, 'w') #logfile
print 'client', rank, 'running on', procname
else:
start = time.time()
jobnum, job = msg
print jobnum
result = opts.dojob(job)#do the job
end = time.time()
print 'working',msg[0], end-start
start = time.time()
pypar.send((rank, (jobnum, result)), 0, tag=RETURN)#return the result to the server
end = time.time()
print 'sending', end-start
#------------------#
#end of parallel code
pypar.barrier()
pypar.finalize()
示例15: xrange
print p.rank(), res
if True:
v = [ 2 for i in xrange(10000000) ]
res = p_dot_all(v,v)
#import time
#time.sleep(p.rank()*2+1)
print p.rank(), res
if False:
s = 0
for i in xrange(100):
r = p.rank()
r = broadcast(r)
s += (r + 1)
p.barrier()
print "%d %d" % ( p.rank(), s )
if False:
m = None
v = None
if root():
m = eye_matrix(3000)
v = range(3000)
r = p_mv(m,v)
if root():
print r
if root():
end = p.time()
total = end - start