本文整理汇总了Python中ipyparallel.Client.load_balanced_view方法的典型用法代码示例。如果您正苦于以下问题:Python Client.load_balanced_view方法的具体用法?Python Client.load_balanced_view怎么用?Python Client.load_balanced_view使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ipyparallel.Client
的用法示例。
在下文中一共展示了Client.load_balanced_view方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_jobs_on_ipythoncluster
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def run_jobs_on_ipythoncluster(worker, task_list, shutdown_ipengines_after_done=False):
t0 = time.time()
rc = Client(CLUSTER_CLIENT_JSON)
lview = rc.load_balanced_view()
cnt_nodes = len(lview.targets or rc.ids)
print("\t# nodes in use: {}".format(cnt_nodes))
lview.block = False
print("\t# of tasks: {}".format(len(task_list)))
print("\tsubmitting...", end='')
job = lview.map_async(worker,task_list)
print("done.")
try:
job.wait_interactive()
except KeyboardInterrupt:
#handle "Ctrl-C"
if ask("\nAbort all submitted jobs?") == 'Y':
lview.abort()
print("Aborted, all submitted jobs are cancelled.")
else:
print("Aborted, but your jobs are still running on the cluster.")
return
if len(job.result()) != len(task_list):
print("WARNING:\t# of results returned ({}) != # of tasks ({}).".format(len(job.result()), len(task_list)))
print("\ttotal time: {}".format(timesofar(t0)))
if shutdown_ipengines_after_done:
print("\tshuting down all ipengine nodes...", end='')
lview.shutdown()
print('Done.')
return job.result()
示例2: create_optimizer
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def create_optimizer(args):
'''returns configured bluepyopt.optimisations.DEAPOptimisation'''
if args.ipyparallel or os.getenv('L5PCBENCHMARK_USEIPYP'):
from ipyparallel import Client
rc = Client(profile=os.getenv('IPYTHON_PROFILE'))
logger.debug('Using ipyparallel with %d engines', len(rc))
lview = rc.load_balanced_view()
def mapper(func, it):
start_time = datetime.now()
ret = lview.map_sync(func, it)
logger.debug('Generation took %s', datetime.now() - start_time)
return ret
map_function = mapper
else:
map_function = None
evaluator = l5pc_evaluator.create()
seed = os.getenv('BLUEPYOPT_SEED', args.seed)
opt = bluepyopt.optimisations.DEAPOptimisation(
evaluator=evaluator,
map_function=map_function,
seed=seed)
return opt
示例3: simulate_general
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def simulate_general(runner, results_filename):
"""
Function with the general code to simulate the MIMO schemes.
"""
# xxxxxxxxxx Print the simulation parameters xxxxxxxxxxxxxxxxxxxxxxxxxx
pprint(runner.params.parameters)
print("MIMO Scheme: {0}".format(runner.mimo_object.__class__.__name__))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Replace any parameter mention in results_filename xxxxxxxxxxxxx
runner.set_results_filename(results_filename)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the simulation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The simulation will be run either in parallel or serially depending
# if the IPython engines are running or not.
run_in_parallel = True
# noinspection PyBroadException,PyBroadException
try:
# If we can get an IPython view that means that the IPython engines
# are running. In that case we will perform the simulation in
# parallel
from ipyparallel import Client
cl = Client()
# We create a direct view to run coe in all engines
dview = cl.direct_view()
# Reset the engines so that we don't have variables there from last
# computations
dview.execute('%reset')
dview.execute('import sys')
# We use block=True to ensure that all engines have modified their
# path to include the folder with the simulator before we create
# the load lanced view in the following.
dview.execute('sys.path.append("{0}")'.format(parent_dir), block=True)
# But for the actual simulation we are better using a load balanced
# view
lview = cl.load_balanced_view()
except Exception: # pylint: disable=W0703
# If we can't get an IPython view then we will perform the
# simulation serially
run_in_parallel = False
if run_in_parallel is True:
print("-----> Simulation will be run in Parallel")
runner.simulate_in_parallel(lview)
else:
print("-----> Simulation will be run serially")
runner.simulate()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
print("Runned iterations: {0}".format(runner.runned_reps))
print("Elapsed Time: {0}".format(runner.elapsed_time))
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n")
return runner.results, runner.results_filename
示例4: do_parallel
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def do_parallel(filelist):
rc = Client()
print('# of engines : %d' % len(rc.ids))
print('# of job : %d' % len(filelist))
lv = rc.load_balanced_view()
result = lv.map_async(singlejob, filelist)
result.wait_interactive()
示例5: setup_parallel
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def setup_parallel(dbname):
c = Client()
dview = c.direct_view()
dview.push({'dbname': str(dbname)})
# dview.push({'remove_duplicates_from_image_name_data':
# remove_duplicates_from_image_name_data,
# 'get_temp_fname': get_temp_fname,
# 'dbname': dbname})
lbview = c.load_balanced_view()
return lbview
示例6: setup_parallel
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def setup_parallel(parallel):
if parallel:
pickleutil.use_dill()
#can_map.pop(FunctionType, None)
#serialize.pickle = pickle
print("Running in parallel")
rc = Client()
rc[:].use_dill()
lview = rc.load_balanced_view()
lview.block = True
else:
lview = None
return lview
示例7: DistributedSpider
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
class DistributedSpider(object):
# Time to wait between polling for task results.
pollingDelay = 0.5
def __init__(self, site):
self.client = Client()
self.view = self.client.load_balanced_view()
self.mux = self.client[:]
self.allLinks = []
self.linksWorking = {}
self.linksDone = {}
self.site = site
def visitLink(self, url):
if url not in self.allLinks:
self.allLinks.append(url)
if url.startswith(self.site):
print(" ", url)
self.linksWorking[url] = self.view.apply(fetchAndParse, url)
def onVisitDone(self, links, url):
print(url + ":")
self.linksDone[url] = None
del self.linksWorking[url]
for link in links:
self.visitLink(link)
def run(self):
self.visitLink(self.site)
while self.linksWorking:
print(len(self.linksWorking), "pending...")
self.synchronize()
time.sleep(self.pollingDelay)
def synchronize(self):
for url, ar in list(self.linksWorking.items()):
# Calling get_task_result with block=False will return None if the
# task is not done yet. This provides a simple way of polling.
try:
links = ar.get(0)
except error.TimeoutError:
continue
except Exception as e:
self.linksDone[url] = None
del self.linksWorking[url]
print("%s: %s" % (url, e))
else:
self.onVisitDone(links, url)
示例8: download_and_calibrate_parallel
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def download_and_calibrate_parallel(list_of_ids, n=None):
"""Download and calibrate in parallel.
Parameters
----------
list_of_ids : list, optional
container with img_ids to process
n : int
Number of cores for the parallel processing. Default: n_cores_system//2
"""
setup_cluster(n_cores=n)
c = Client()
lbview = c.load_balanced_view()
lbview.map_async(download_and_calibrate, list_of_ids)
subprocess.Popen(["ipcluster", "stop", "--quiet"])
示例9: process_trajectories
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def process_trajectories(*processors, postprocessor, ipyparallel=None):
trajectories = []
for proc in processors:
for info in proc.get_infos():
trajectories += [Trajectory(info, proc, postprocessor)]
if ipyparallel is not None:
from ipyparallel import Client
rc = Client(profile=ipyparallel)
lbv = rc.load_balanced_view()
with lbv.temp_flags(retries=10):
lbv.map_async(_process_trajectory, trajectories, retries=10)
else:
with Pool(processes=os.cpu_count() - 1) as pool:
pool.map(_process_trajectory, trajectories, chunksize=1)
log.info("Done!")
示例10: IPClusterEnsemble
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
class IPClusterEnsemble(SurveyEnsemble):
"""
Parallelized suvey ensemble based on IPython parallal (ipcluster)
Args:
\*\*specs:
user specified values
Attributes:
Notes:
"""
def __init__(self, **specs):
SurveyEnsemble.__init__(self, **specs)
# access the cluster
self.rc = Client()
self.dview = self.rc[:]
self.dview.block = True
with self.dview.sync_imports(): import EXOSIMS,EXOSIMS.util.get_module
r1 = self.dview.execute("SurveySim = EXOSIMS.util.get_module.get_module('%s', 'SurveySimulation')"%specs['modules']['SurveySimulation'])
self.dview.push(dict(specs=specs))
r2 = self.dview.execute("sim = SurveySim(**specs)")
self.lview = self.rc.load_balanced_view()
def run_ensemble(self,run_one,N=10):
t1 = time.time()
async_res = []
for j in range(N):
ar = self.lview.apply_async(run_one)
async_res.append(ar)
print "Submitted tasks: ", len(async_res)
self.rc.wait(async_res)
t2 = time.time()
print "Completed in %d sec" %(t2-t1)
res = [ar.get() for ar in async_res]
return res
示例11: main
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def main():
parser = OptionParser()
parser.set_defaults(n=100)
parser.set_defaults(tmin=1e-3)
parser.set_defaults(tmax=1)
parser.set_defaults(profile='default')
parser.add_option("-n", type='int', dest='n',
help='the number of tasks to run')
parser.add_option("-t", type='float', dest='tmin',
help='the minimum task length in seconds')
parser.add_option("-T", type='float', dest='tmax',
help='the maximum task length in seconds')
parser.add_option("-p", '--profile', type='str', dest='profile',
help="the cluster profile [default: 'default']")
(opts, args) = parser.parse_args()
assert opts.tmax >= opts.tmin, "tmax must not be smaller than tmin"
rc = Client()
view = rc.load_balanced_view()
print(view)
rc.block=True
nengines = len(rc.ids)
with rc[:].sync_imports():
from IPython.utils.timing import time
# the jobs should take a random time within a range
times = [random.random()*(opts.tmax-opts.tmin)+opts.tmin for i in range(opts.n)]
stime = sum(times)
print("executing %i tasks, totalling %.1f secs on %i engines"%(opts.n, stime, nengines))
time.sleep(1)
start = time.time()
amr = view.map(time.sleep, times)
amr.get()
stop = time.time()
ptime = stop-start
scale = stime/ptime
print("executed %.1f secs in %.1f secs"%(stime, ptime))
print("%.3fx parallel performance on %i engines"%(scale, nengines))
print("%.1f%% of theoretical max"%(100*scale/nengines))
示例12: _perform_evolution
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def _perform_evolution(self, algo, pop):
try:
from ipyparallel import Client
# Create client
rc = Client()
# Create Load-balanced view
lbview = rc.load_balanced_view()
# Run the task
lbview.block = True
ar = lbview.apply(_maptask_target, args=(algo, pop))
# Get retval
retval = ar.get()
if isinstance(retval, BaseException):
raise retval
return retval
except BaseException as e:
print('Exception caught during evolution:')
print(e)
raise RuntimeError()
示例13: main
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('directory',
help="Provide the directory of the HDF files "
"that shall be converted to csv here.")
args = parser.parse_args()
root = os.path.abspath(args.directory)
fnames = glob.glob(os.path.join(root, '*.hdf'))
logging.info('Found %i files to convert.', len(fnames))
c = Client()
lbview = c.load_balanced_view()
results = lbview.map_async(process_fname, fnames)
# progress display
while not results.ready():
print("{:.1f} %".format(100 * results.progress / len(fnames)))
sys.stdout.flush()
time.sleep(10)
logging.info('Conversion done.')
示例14: par_value
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def par_value(n):
"""
Parallel option valuation
Parameters
==========
n: int
number of option valuations/strikes
"""
import numpy as np
from ipyparallel import Client
c = Client(profile="default")
view = c.load_balanced_view()
strikes = np.linspace(80, 20, n)
option_values = []
for strike in strikes:
values = view.apply_async(bsm_mcs_valuation, strike)
option_values.append(values)
c.wait(option_values)
return strikes, option_values
示例15: start_ipcluster
# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import load_balanced_view [as 别名]
def start_ipcluster(ipcluster_exe, nengines, profile,
max_retries=50):
"""
Start a new IPython parallel cluster (daemon)
with a number of `nengines` and using `profile`.
"""
from ipyparallel import Client
ipcluster = None
rc = None
dview = None
lview = None
ipcluster = os.system(
'{} start -n={} --profile={} --daemon'
.format(ipcluster_exe, nengines, profile)
)
# retry until ipcluster is ready
time.sleep(3)
rc = Client(profile=profile)
retries = 0
while True:
if retries > max_retries:
stop_ipcluster(ipcluster_exe, profile)
raise Exception("impossible to access to (all) engines "
"of the IPython parallel cluster")
if len(rc.ids) < nengines:
retries += 1
time.sleep(1)
continue
else:
break
dview = rc[:]
lview = rc.load_balanced_view()
return ipcluster, rc, dview, lview