本文整理汇总了Python中ipyparallel.Client类的典型用法代码示例。如果您正苦于以下问题:Python Client类的具体用法?Python Client怎么用?Python Client使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Client类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_optimizer
def create_optimizer(args):
'''returns configured bluepyopt.optimisations.DEAPOptimisation'''
if args.ipyparallel or os.getenv('L5PCBENCHMARK_USEIPYP'):
from ipyparallel import Client
rc = Client(profile=os.getenv('IPYTHON_PROFILE'))
logger.debug('Using ipyparallel with %d engines', len(rc))
lview = rc.load_balanced_view()
def mapper(func, it):
start_time = datetime.now()
ret = lview.map_sync(func, it)
logger.debug('Generation took %s', datetime.now() - start_time)
return ret
map_function = mapper
else:
map_function = None
evaluator = l5pc_evaluator.create()
seed = os.getenv('BLUEPYOPT_SEED', args.seed)
opt = bluepyopt.optimisations.DEAPOptimisation(
evaluator=evaluator,
map_function=map_function,
seed=seed)
return opt
示例2: ParallelPool
class ParallelPool( object ):
def __init__(self):
#Load configuration
self.c = Configuration.Configuration( )
#Now instance the pool of batch workers according
#to the technology selected in the configuration file
if self.c.parallel.technology=='ipython':
self.IPYc = Client( profile=self.c.parallel.ipython.profile )
self.pool = self.IPYc[:]
elif self.c.parallel.technology=='python':
if self.c.parallel.python.number_of_processes==0:
n_cpus = multiprocessing.cpu_count()
else:
n_cpus = self.c.parallel.python.number_of_processes
self.pool = multiprocessing.Pool( n_cpus )
else:
raise ValueError("Unknown technology %s in configuration file"
%(self.c.parallel.technology))
#The following methods simply forward the requests to the
#batch worker technology
def map( self, *args, **kwargs ):
if self.c.parallel.technology=='ipython':
return self.pool.map( *args, **kwargs ).get()
else:
return self.pool.map( *args, **kwargs )
def imap( self, *args, **kwargs ):
return self.pool.imap( *args, **kwargs )
def close( self ):
if self.c.parallel.technology=='ipython':
self.IPYc.close()
else:
self.pool.close()
self.pool.join()
示例3: __enter__
def __enter__(self):
args = []
if self.profile is not None:
args.append("--profile=" + self.profile)
if self.cluster_id is not None:
args.append("--cluster-id=" + self.cluster_id)
if self.num_engines is not None:
args.append("--n=" + str(self.num_engines))
if self.ipython_dir is not None:
args.append("--ipython-dir=" + self.ipython_dir)
cmd = " ".join(["ipcluster start --daemonize"] + args)
self.logger.info('Staring IPython cluster with "' + cmd + '"')
os.system(cmd)
num_engines, timeout = self.num_engines, self.timeout
time.sleep(self.min_wait)
waited = self.min_wait
client = None
while client is None:
try:
client = Client(profile=self.profile, cluster_id=self.cluster_id)
except (IOError, TimeoutError):
if waited >= self.timeout:
raise IOError("Could not connect to IPython cluster controller")
if waited % 10 == 0:
self.logger.info("Waiting for controller to start ...")
time.sleep(1)
waited += 1
if num_engines is None:
while len(client) == 0 and waited < timeout:
if waited % 10 == 0:
self.logger.info("Waiting for engines to start ...")
time.sleep(1)
waited += 1
if len(client) == 0:
raise IOError("IPython cluster engines failed to start")
wait = min(waited, timeout - waited)
if wait > 0:
self.logger.info("Waiting {} more seconds for engines to start ...".format(wait))
time.sleep(wait)
else:
running = len(client)
while running < num_engines and waited < timeout:
if waited % 10 == 0:
self.logger.info(
"Waiting for {} of {} engines to start ...".format(num_engines - running, num_engines)
)
time.sleep(1)
waited += 1
running = len(client)
running = len(client)
if running < num_engines:
raise IOError(
"{} of {} IPython cluster engines failed to start".format(num_engines - running, num_engines)
)
client.close()
self.pool = IPythonPool(profile=self.profile, cluster_id=self.cluster_id)
return self.pool
示例4: run_jobs_on_ipythoncluster
def run_jobs_on_ipythoncluster(worker, task_list, shutdown_ipengines_after_done=False):
t0 = time.time()
rc = Client(CLUSTER_CLIENT_JSON)
lview = rc.load_balanced_view()
cnt_nodes = len(lview.targets or rc.ids)
print("\t# nodes in use: {}".format(cnt_nodes))
lview.block = False
print("\t# of tasks: {}".format(len(task_list)))
print("\tsubmitting...", end='')
job = lview.map_async(worker,task_list)
print("done.")
try:
job.wait_interactive()
except KeyboardInterrupt:
#handle "Ctrl-C"
if ask("\nAbort all submitted jobs?") == 'Y':
lview.abort()
print("Aborted, all submitted jobs are cancelled.")
else:
print("Aborted, but your jobs are still running on the cluster.")
return
if len(job.result()) != len(task_list):
print("WARNING:\t# of results returned ({}) != # of tasks ({}).".format(len(job.result()), len(task_list)))
print("\ttotal time: {}".format(timesofar(t0)))
if shutdown_ipengines_after_done:
print("\tshuting down all ipengine nodes...", end='')
lview.shutdown()
print('Done.')
return job.result()
示例5: add_engines
def add_engines(n=1, profile='iptest', total=False):
"""add a number of engines to a given profile.
If total is True, then already running engines are counted, and only
the additional engines necessary (if any) are started.
"""
rc = Client(profile=profile)
base = len(rc)
if total:
n = max(n - base, 0)
eps = []
for i in range(n):
ep = TestProcessLauncher()
ep.cmd_and_args = ipengine_cmd_argv + [
'--profile=%s' % profile,
'--InteractiveShell.colors=nocolor'
]
ep.start()
launchers.append(ep)
eps.append(ep)
tic = time.time()
while len(rc) < base+n:
if any([ ep.poll() is not None for ep in eps ]):
raise RuntimeError("A test engine failed to start.")
elif time.time()-tic > 15:
raise RuntimeError("Timeout waiting for engines to connect.")
time.sleep(.1)
rc.close()
return eps
示例6: simulate_general
def simulate_general(runner, results_filename):
"""
Function with the general code to simulate the MIMO schemes.
"""
# xxxxxxxxxx Print the simulation parameters xxxxxxxxxxxxxxxxxxxxxxxxxx
pprint(runner.params.parameters)
print("MIMO Scheme: {0}".format(runner.mimo_object.__class__.__name__))
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxx Replace any parameter mention in results_filename xxxxxxxxxxxxx
runner.set_results_filename(results_filename)
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# xxxxxxxxxx Perform the simulation xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
# The simulation will be run either in parallel or serially depending
# if the IPython engines are running or not.
run_in_parallel = True
# noinspection PyBroadException,PyBroadException
try:
# If we can get an IPython view that means that the IPython engines
# are running. In that case we will perform the simulation in
# parallel
from ipyparallel import Client
cl = Client()
# We create a direct view to run coe in all engines
dview = cl.direct_view()
# Reset the engines so that we don't have variables there from last
# computations
dview.execute('%reset')
dview.execute('import sys')
# We use block=True to ensure that all engines have modified their
# path to include the folder with the simulator before we create
# the load lanced view in the following.
dview.execute('sys.path.append("{0}")'.format(parent_dir), block=True)
# But for the actual simulation we are better using a load balanced
# view
lview = cl.load_balanced_view()
except Exception: # pylint: disable=W0703
# If we can't get an IPython view then we will perform the
# simulation serially
run_in_parallel = False
if run_in_parallel is True:
print("-----> Simulation will be run in Parallel")
runner.simulate_in_parallel(lview)
else:
print("-----> Simulation will be run serially")
runner.simulate()
# xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
print("Runned iterations: {0}".format(runner.runned_reps))
print("Elapsed Time: {0}".format(runner.elapsed_time))
print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n")
return runner.results, runner.results_filename
示例7: __init__
def __init__(self):
from ipyparallel import Client
rc = Client()
rc.block=True
self.cpu = rc[:]
print '{} cores ready'.format(len(self.cpu))
self.cpu.execute('import numpy as np')
self.cpu.execute('from sklearn.neighbors import KDTree, BallTree')
示例8: do_parallel
def do_parallel(filelist):
rc = Client()
print('# of engines : %d' % len(rc.ids))
print('# of job : %d' % len(filelist))
lv = rc.load_balanced_view()
result = lv.map_async(singlejob, filelist)
result.wait_interactive()
示例9: connect_client
def connect_client(self):
"""connect a client with my Context, and track its sockets for cleanup"""
c = Client(profile='iptest', context=self.context)
c.wait = lambda *a, **kw: self.client_wait(c, *a, **kw)
for name in filter(lambda n:n.endswith('socket'), dir(c)):
s = getattr(c, name)
s.setsockopt(zmq.LINGER, 0)
self.sockets.append(s)
return c
示例10: setup_parallel
def setup_parallel(dbname):
c = Client()
dview = c.direct_view()
dview.push({'dbname': str(dbname)})
# dview.push({'remove_duplicates_from_image_name_data':
# remove_duplicates_from_image_name_data,
# 'get_temp_fname': get_temp_fname,
# 'dbname': dbname})
lbview = c.load_balanced_view()
return lbview
示例11: setup_parallel
def setup_parallel(parallel):
if parallel:
pickleutil.use_dill()
#can_map.pop(FunctionType, None)
#serialize.pickle = pickle
print("Running in parallel")
rc = Client()
rc[:].use_dill()
lview = rc.load_balanced_view()
lview.block = True
else:
lview = None
return lview
示例12: download_and_calibrate_parallel
def download_and_calibrate_parallel(list_of_ids, n=None):
"""Download and calibrate in parallel.
Parameters
----------
list_of_ids : list, optional
container with img_ids to process
n : int
Number of cores for the parallel processing. Default: n_cores_system//2
"""
setup_cluster(n_cores=n)
c = Client()
lbview = c.load_balanced_view()
lbview.map_async(download_and_calibrate, list_of_ids)
subprocess.Popen(["ipcluster", "stop", "--quiet"])
示例13: stop_server
def stop_server(is_slurm=False):
'''
programmatically stops the ipyparallel server
'''
sys.stdout.write("Stopping cluster...\n")
sys.stdout.flush()
if is_slurm:
from ipyparallel import Client
pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
c = Client(ipython_dir=pdir, profile=profile)
ee = c[:]
ne = len(ee)
print 'Shutting down %d engines.'%(ne)
c.shutdown(hub=True)
shutil.rmtree('profile_' + str(profile))
try:
shutil.rmtree('./log/')
except:
print 'creating log folder'
files = glob.glob('*.log')
os.mkdir('./log')
for fl in files:
shutil.move(fl, './log/')
else:
proc = subprocess.Popen(["ipcluster stop"], shell=True, stderr=subprocess.PIPE)
line_out = proc.stderr.readline()
if 'CRITICAL' in line_out:
sys.stdout.write("No cluster to stop...")
sys.stdout.flush()
elif 'Stopping' in line_out:
st = time.time()
sys.stdout.write('Waiting for cluster to stop...')
while (time.time() - st) < 4:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(1)
else:
print '**** Unrecognized Syntax in ipcluster output, waiting for server to stop anyways ****'
sys.stdout.write(" done\n")
示例14: __init__
def __init__(self, addpath=None):
self.client = Client()
self.load_balanced_view = self.client.load_balanced_view()
if(len(self.client.ids) == 0):
print('# of engines : single mode')
else:
print('# of engines : %d' % len(self.client.ids))
示例15: __init__
def __init__(self):
#Load configuration
self.c = Configuration.Configuration( )
#Now instance the pool of batch workers according
#to the technology selected in the configuration file
if self.c.parallel.technology=='ipython':
self.IPYc = Client( profile=self.c.parallel.ipython.profile )
self.pool = self.IPYc[:]
elif self.c.parallel.technology=='python':
if self.c.parallel.python.number_of_processes==0:
n_cpus = multiprocessing.cpu_count()
else:
n_cpus = self.c.parallel.python.number_of_processes
self.pool = multiprocessing.Pool( n_cpus )
else:
raise ValueError("Unknown technology %s in configuration file"
%(self.c.parallel.technology))