当前位置: 首页>>代码示例>>Python>>正文


Python Client.close方法代码示例

本文整理汇总了Python中ipyparallel.Client.close方法的典型用法代码示例。如果您正苦于以下问题:Python Client.close方法的具体用法?Python Client.close怎么用?Python Client.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ipyparallel.Client的用法示例。


在下文中一共展示了Client.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ParallelPool

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
class ParallelPool( object ):
    
    def __init__(self):
        
        #Load configuration
        self.c = Configuration.Configuration(  )
        
        #Now instance the pool of batch workers according
        #to the technology selected in the configuration file
        
        if self.c.parallel.technology=='ipython':
            
            self.IPYc = Client( profile=self.c.parallel.ipython.profile )
    
            self.pool = self.IPYc[:]
        
        elif self.c.parallel.technology=='python':
            
            if self.c.parallel.python.number_of_processes==0:
                
                n_cpus = multiprocessing.cpu_count()
            
            else:
                
                n_cpus = self.c.parallel.python.number_of_processes
            
            self.pool = multiprocessing.Pool( n_cpus )
        
        else:
            
            raise ValueError("Unknown technology %s in configuration file" 
                             %(self.c.parallel.technology))
    
    #The following methods simply forward the requests to the
    #batch worker technology
    
    def map( self, *args, **kwargs ):
        
        if self.c.parallel.technology=='ipython':
        
            return self.pool.map( *args, **kwargs ).get()
        
        else:
            
            return self.pool.map( *args, **kwargs )
    
    def imap( self, *args, **kwargs ):
        
        return self.pool.imap( *args, **kwargs )
    
    def close( self ):
        
        if self.c.parallel.technology=='ipython':
            
            self.IPYc.close()
        
        else:
            
            self.pool.close()
            self.pool.join()
开发者ID:giacomov,项目名称:pyggop,代码行数:62,代码来源:ParallelPool.py

示例2: __enter__

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
    def __enter__(self):
        args = []
        if self.profile is not None:
            args.append("--profile=" + self.profile)
        if self.cluster_id is not None:
            args.append("--cluster-id=" + self.cluster_id)
        if self.num_engines is not None:
            args.append("--n=" + str(self.num_engines))
        if self.ipython_dir is not None:
            args.append("--ipython-dir=" + self.ipython_dir)
        cmd = " ".join(["ipcluster start --daemonize"] + args)
        self.logger.info('Staring IPython cluster with "' + cmd + '"')
        os.system(cmd)

        num_engines, timeout = self.num_engines, self.timeout
        time.sleep(self.min_wait)
        waited = self.min_wait
        client = None
        while client is None:
            try:
                client = Client(profile=self.profile, cluster_id=self.cluster_id)
            except (IOError, TimeoutError):
                if waited >= self.timeout:
                    raise IOError("Could not connect to IPython cluster controller")
                if waited % 10 == 0:
                    self.logger.info("Waiting for controller to start ...")
                time.sleep(1)
                waited += 1

        if num_engines is None:
            while len(client) == 0 and waited < timeout:
                if waited % 10 == 0:
                    self.logger.info("Waiting for engines to start ...")
                time.sleep(1)
                waited += 1
            if len(client) == 0:
                raise IOError("IPython cluster engines failed to start")
            wait = min(waited, timeout - waited)
            if wait > 0:
                self.logger.info("Waiting {} more seconds for engines to start ...".format(wait))
                time.sleep(wait)
        else:
            running = len(client)
            while running < num_engines and waited < timeout:
                if waited % 10 == 0:
                    self.logger.info(
                        "Waiting for {} of {} engines to start ...".format(num_engines - running, num_engines)
                    )
                time.sleep(1)
                waited += 1
                running = len(client)
            running = len(client)
            if running < num_engines:
                raise IOError(
                    "{} of {} IPython cluster engines failed to start".format(num_engines - running, num_engines)
                )
        client.close()

        self.pool = IPythonPool(profile=self.profile, cluster_id=self.cluster_id)
        return self.pool
开发者ID:lucas-ca,项目名称:pymor,代码行数:62,代码来源:ipython.py

示例3: add_engines

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def add_engines(n=1, profile='iptest', total=False):
    """add a number of engines to a given profile.
    
    If total is True, then already running engines are counted, and only
    the additional engines necessary (if any) are started.
    """
    rc = Client(profile=profile)
    base = len(rc)
    
    if total:
        n = max(n - base, 0)
    
    eps = []
    for i in range(n):
        ep = TestProcessLauncher()
        ep.cmd_and_args = ipengine_cmd_argv + [
            '--profile=%s' % profile,
            '--InteractiveShell.colors=nocolor'
            ]
        ep.start()
        launchers.append(ep)
        eps.append(ep)
    tic = time.time()
    while len(rc) < base+n:
        if any([ ep.poll() is not None for ep in eps ]):
            raise RuntimeError("A test engine failed to start.")
        elif time.time()-tic > 15:
            raise RuntimeError("Timeout waiting for engines to connect.")
        time.sleep(.1)
    rc.close()
    return eps
开发者ID:marcjaxa,项目名称:EMAworkbench,代码行数:33,代码来源:test_ema_ipyparallel.py

示例4: motion_correct_parallel

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def motion_correct_parallel(file_names,fr,template=None,margins_out=0,max_shift_w=5, max_shift_h=5,remove_blanks=False,apply_smooth=True,backend='single_thread'):
    """motion correct many movies usingthe ipyparallel cluster
    Parameters
    ----------
    file_names: list of strings
        names of he files to be motion corrected
    fr: double
        fr parameters for calcblitz movie 
    margins_out: int
        number of pixels to remove from the borders    
    
    Return
    ------
    base file names of the motion corrected files
    """
    args_in=[];
    for f in file_names:
        args_in.append((f,fr,margins_out,template,max_shift_w, max_shift_h,remove_blanks,apply_smooth))
        
    try:
        
        if backend is 'ipyparallel':
            
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(process_movie_parallel, args_in)                         
            dview.results.clear()       
            c.purge_results('all')
            c.purge_everything()
            c.close()    

        elif backend is 'single_thread':
            
            file_res = map(process_movie_parallel, args_in)        
                 
        else:
            raise Exception('Unknown backend')
        
    except :   
        
        try:
            if backend is 'ipyparallel':
                
                dview.results.clear()       
                c.purge_results('all')
                c.purge_everything()
                c.close()
        except UnboundLocalError as uberr:

            print 'could not close client'

        raise
                                    
    return file_res
开发者ID:epnev,项目名称:CalBlitz,代码行数:56,代码来源:utils.py

示例5: start_server

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def start_server(slurm_script=None, ipcluster="ipcluster", ncpus=None):
    """
    programmatically start the ipyparallel server

    Parameters:
    ----------
    ncpus: int
        number of processors

    ipcluster : str
        ipcluster binary file name; requires 4 path separators on Windows. ipcluster="C:\\\\Anaconda2\\\\Scripts\\\\ipcluster.exe"
         Default: "ipcluster"
    """
    logger.info("Starting cluster...")
    if ncpus is None:
        ncpus = psutil.cpu_count()

    if slurm_script is None:
        if ipcluster == "ipcluster":
            subprocess.Popen(
                "ipcluster start -n {0}".format(ncpus), shell=True, close_fds=(os.name != 'nt'))
        else:
            subprocess.Popen(shlex.split(
                "{0} start -n {1}".format(ipcluster, ncpus)), shell=True, close_fds=(os.name != 'nt'))

        # Check that all processes have started
        time.sleep(1)
        client = ipyparallel.Client()
        while len(client) < ncpus:
            client.close()

            time.sleep(1)
            client = ipyparallel.Client()
        time.sleep(10)
        logger.debug('Making sure everything is up and running')
        client.close()

    else:
        shell_source(slurm_script)
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        print([pdir,profile])
        c = Client(ipython_dir=pdir, profile=profile)
        ee = c[:]
        ne = len(ee)
        print(('Running on %d engines.' % (ne)))
        c.close()
        sys.stdout.write(" done\n")
开发者ID:Peichao,项目名称:Constrained_NMF,代码行数:49,代码来源:cluster.py

示例6: test_hubresult_timestamps

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
 def test_hubresult_timestamps(self):
     self.minimum_engines(4)
     v = self.client[:]
     ar = v.apply_async(time.sleep, 0.25)
     ar.get(2)
     rc2 = Client(profile='iptest')
     # must have try/finally to close second Client, otherwise
     # will have dangling sockets causing problems
     try:
         time.sleep(0.25)
         hr = rc2.get_result(ar.msg_ids)
         self.assertTrue(hr.elapsed > 0., "got bad elapsed: %s" % hr.elapsed)
         hr.get(1)
         self.assertTrue(hr.wall_time < ar.wall_time + 0.2, "got bad wall_time: %s > %s" % (hr.wall_time, ar.wall_time))
         self.assertEqual(hr.serial_time, ar.serial_time)
     finally:
         rc2.close()
开发者ID:KEHANG,项目名称:ipyparallel,代码行数:19,代码来源:test_asyncresult.py

示例7: extract_rois_patch

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def extract_rois_patch(file_name,d1,d2,rf=5,stride = 5):
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf,stride = stride)
    perctl=95
    n_components=2
    tol=1e-6
    max_iter=5000
    args_in=[]    
    for id_f,id_2d in zip(idx_flat,idx_2d):        
        args_in.append((file_name, id_f,id_2d[0].shape, perctl,n_components,tol,max_iter))
    st=time.time()
    print len(idx_flat)
    try:
        if 1:
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(nmf_patches, args_in)                         
        else:
            file_res = map(nmf_patches, args_in)                         
    finally:
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
    
    print time.time()-st
    
    A1=lil_matrix((d1*d2,len(file_res)))
    C1=[]
    A2=lil_matrix((d1*d2,len(file_res)))
    C2=[]
    for count,f in enumerate(file_res):
        idx_,flt,ca,d=f
        #flt,ca,_=cse.order_components(coo_matrix(flt),ca)
        A1[idx_,count]=flt[:,0][:,np.newaxis]        
        A2[idx_,count]=flt[:,1][:,np.newaxis]        
        C1.append(ca[0,:])
        C2.append(ca[1,:])
#        pl.imshow(np.reshape(flt[:,0],d,order='F'),vmax=10)
#        pl.pause(.1)
        
        
    return A1,A2,C1,C2
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:44,代码来源:map_reduce.py

示例8: start_server

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def start_server(ncpus,slurm_script=None):
    '''
    programmatically start the ipyparallel server

    Parameters
    ----------
    ncpus: int
        number of processors

    '''
    sys.stdout.write("Starting cluster...")
    sys.stdout.flush()
    
    if slurm_script is None:
        subprocess.Popen(["ipcluster start -n {0}".format(ncpus)], shell=True)
        while True:
            try:
                c = ipyparallel.Client()
                if len(c) < ncpus:
                    sys.stdout.write(".")
                    sys.stdout.flush()
                    raise ipyparallel.error.TimeoutError
                c.close()
                break
            except (IOError, ipyparallel.error.TimeoutError):
                sys.stdout.write(".")
                sys.stdout.flush()
                time.sleep(1)
    else:
        shell_source(slurm_script)
        from ipyparallel import Client
        pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
        c = Client(ipython_dir=pdir, profile=profile)   
        ee = c[:]
        ne = len(ee)
        print 'Running on %d engines.'%(ne)
        c.close()             
        sys.stdout.write(" done\n")
开发者ID:valentina-s,项目名称:Constrained_NMF,代码行数:40,代码来源:utilities.py

示例9: run_CNMF_patches

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def run_CNMF_patches(file_name, shape, options, rf=16, stride = 4, n_processes=2, backend='single_thread',memory_fact=1):
    """Function that runs CNMF in patches, either in parallel or sequentiually, and return the result for each. It requires that ipyparallel is running
        
    Parameters
    ----------        
    file_name: string
        full path to an npy file (2D, pixels x time) containing the movie        
        
    shape: tuple of thre elements
        dimensions of the original movie across y, x, and time 
    
    options:
        dictionary containing all the parameters for the various algorithms
    
    rf: int 
        half-size of the square patch in pixel
    
    stride: int
        amount of overlap between patches
        
    backend: string
        'ipyparallel' or 'single_thread'
    
    n_processes: int
        nuber of cores to be used (should be less than the number of cores started with ipyparallel)
        
    memory_fact: double
        unitless number accounting how much memory should be used. It represents the fration of patch processed in a single thread. You will need to try different values to see which one would work
    
    
    Returns
    -------
    A_tot: matrix containing all the componenents from all the patches
    
    C_tot: matrix containing the calcium traces corresponding to A_tot
    
    sn_tot: per pixel noise estimate
    
    optional_outputs: set of outputs related to the result of CNMF ALGORITHM ON EACH patch   
    """
    (d1,d2,T)=shape
    d=d1*d2
    K=options['init_params']['K']
    
    options['preprocess_params']['backend']='single_thread' 
    options['preprocess_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['spatial_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['temporal_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['spatial_params']['backend']='single_thread'
    options['temporal_params']['backend']='single_thread'

    
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf, stride = stride)
#    import pdb 
#    pdb.set_trace()
    args_in=[]    
    for id_f,id_2d in zip(idx_flat[:],idx_2d[:]):        
        args_in.append((file_name, id_f,id_2d[0].shape, options))

    print len(idx_flat)

    st=time.time()        
    
    if backend is 'ipyparallel':

        try:

            c = Client()   
            dview=c[:n_processes]
            file_res = dview.map_sync(cnmf_patches, args_in)        
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()
            c.close()         
        except:
            print('Something went wrong')  
            raise
        finally:
            print('You may think that it went well but reality is harsh')
                    

    elif backend is 'single_thread':

        file_res = map(cnmf_patches, args_in)                         

    else:
        raise Exception('Backend unknown')
            
      
    print time.time()-st
    
    
    # extract the values from the output of mapped computation
    num_patches=len(file_res)
    
    A_tot=scipy.sparse.csc_matrix((d,K*num_patches))
    B_tot=scipy.sparse.csc_matrix((d,num_patches))
    C_tot=np.zeros((K*num_patches,T))
    F_tot=np.zeros((num_patches,T))
    mask=np.zeros(d)
#.........这里部分代码省略.........
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:103,代码来源:map_reduce.py

示例10: stop_server

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def stop_server(ipcluster='ipcluster', pdir=None, profile=None, dview=None):
    """
    programmatically stops the ipyparallel server

    Parameters:
     ----------
     ipcluster : str
         ipcluster binary file name; requires 4 path separators on Windows
         Default: "ipcluster"

    """
    if 'multiprocessing' in str(type(dview)):
        dview.terminate()
    else:
        logger.info("Stopping cluster...")
        try:
            pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
            is_slurm = True
        except:
            logger.debug('stop_server: not a slurm cluster')
            is_slurm = False

        if is_slurm:
            if pdir is None and profile is None:
                pdir, profile = os.environ['IPPPDIR'], os.environ['IPPPROFILE']
            c = Client(ipython_dir=pdir, profile=profile)
            ee = c[:]
            ne = len(ee)
            print(('Shutting down %d engines.' % (ne)))
            c.close()
            c.shutdown(hub=True)
            shutil.rmtree('profile_' + str(profile))
            try:
                shutil.rmtree('./log/')
            except:
                print('creating log folder')

            files = glob.glob('*.log')
            os.mkdir('./log')

            for fl in files:
                shutil.move(fl, './log/')

        else:
            if ipcluster == "ipcluster":
                proc = subprocess.Popen(
                    "ipcluster stop", shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt'))
            else:
                proc = subprocess.Popen(shlex.split(ipcluster + " stop"),
                                        shell=True, stderr=subprocess.PIPE, close_fds=(os.name != 'nt'))

            line_out = proc.stderr.readline()
            if b'CRITICAL' in line_out:
                logger.info("No cluster to stop...")
            elif b'Stopping' in line_out:
                st = time.time()
                logger.debug('Waiting for cluster to stop...')
                while (time.time() - st) < 4:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                    time.sleep(1)
            else:
                print(line_out)
                print(
                    '**** Unrecognized syntax in ipcluster output, waiting for server to stop anyways ****')

            proc.stderr.close()

    logger.info("stop_cluster(): done")
开发者ID:Peichao,项目名称:Constrained_NMF,代码行数:71,代码来源:cluster.py

示例11: update_temporal_components_parallel

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]

#.........这里部分代码省略.........
            print len(c)
            raise Exception("the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")            
        
        dview=c[:n_processes] # use the number of processes
    
    Cin=np.array(Cin.todense())    
    for iter in range(ITER):
        O,lo = update_order(A.tocsc()[:,:nr])
        P_=[];
        for count,jo_ in enumerate(O):
            jo=np.array(list(jo_))           
            Ytemp = YrA[:,jo.flatten()] + (np.dot(np.diag(nA[jo]),Cin[jo,:])).T
            Ctemp = np.zeros((np.size(jo),T))
            Stemp = np.zeros((np.size(jo),T))
            btemp = np.zeros((np.size(jo),1))
            sntemp = btemp.copy()
            c1temp = btemp.copy()
            gtemp = np.zeros((np.size(jo),kwargs['p']));
            nT = nA[jo]            
            
            
#            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, bl[jo[jj]], c1[jo[jj]], g[jo[jj]], sn[jo[jj]], kwargs) for jj in range(len(jo))]
            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, None, None, None, None, kwargs) for jj in range(len(jo))]
            
            if backend == 'ipyparallel':                    
                
                results = dview.map_sync(constrained_foopsi_parallel,args_in)        

            elif backend == 'single_thread':
                
                results = map(constrained_foopsi_parallel,args_in)            
                
            else:
                
                raise Exception('Backend not defined. Use either single_thread or ipyparallel')
                
            for chunk in results:
                #pars=dict(kwargs)
                C_,Sp_,Ytemp_,cb_,c1_,sn_,gn_,jj_=chunk                    
                Ctemp[jj_,:] = C_[None,:]
                                
                Stemp[jj_,:] = Sp_               
                Ytemp[:,jj_] = Ytemp_[:,None]            
                btemp[jj_] = cb_
                c1temp[jj_] = c1_
                sntemp[jj_] = sn_   
                gtemp[jj_,:] = gn_.T  
                   
                bl[jo[jj_]] = cb_
                c1[jo[jj_]] = c1_
                sn[jo[jj_]] = sn_
                g[jo[jj_]]  = gtemp[jj,:]#[jj_,np.abs(gtemp[jj,:])>0] 
                             
                
                #pars['b'] = cb_
#                pars['c1'] = c1_                 
#                pars['neuron_sn'] = sn_
#                pars['gn'] = gtemp[jj_,np.abs(gtemp[jj,:])>0] 
#                
##                for jj = 1:length(O{jo})
##                    P.gn(O{jo}(jj)) = {gtemp(jj,abs(gtemp(jj,:))>0)'};
##                end
#                pars['neuron_id'] = jo[jj_]
#                P_.append(pars)
            
            YrA[:,jo] = Ytemp
            C[jo,:] = Ctemp            
            S[jo,:] = Stemp
            
#            if (np.sum(lo[:jo])+1)%1 == 0:
            print str(np.sum(lo[:count])) + ' out of total ' + str(nr) + ' temporal components updated \n'
        
        ii=nr        
        YrA[:,ii] = YrA[:,ii] + nA[ii]*np.atleast_2d(Cin[ii,:]).T
        cc = np.maximum(YrA[:,ii]/nA[ii],0)
        C[ii,:] = cc[:].T
        YrA[:,ii] = YrA[:,ii] - nA[ii]*np.atleast_2d(C[ii,:]).T 
        
        if backend == 'ipyparallel':       
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()

        if scipy.linalg.norm(Cin - C,'fro')/scipy.linalg.norm(C,'fro') <= 1e-3:
            # stop if the overall temporal component does not change by much
            print "stopping: overall temporal component not changing significantly"
            break
        else:
            Cin = C
    
    Y_res = Y - A*C # this includes the baseline term
    
    f = C[nr:,:]
    C = C[:nr,:]
        
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 
    if backend == 'ipyparallel':      
        c.close()
    
    return C,f,Y_res,S,bl,c1,sn,g
开发者ID:garretstuber,项目名称:Constrained_NMF,代码行数:104,代码来源:temporal.py

示例12: update_spatial_components

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]

#.........这里部分代码省略.........
        else:
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)            
            Y,_,_,_=load_memmap(Y_name)    

        # create arguments to be passed to the function. Here we are grouping
        # bunch of pixels to be processed by each thread
        pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
                        for i in range(0, d1 * d2 - n_pixels_per_process + 1, n_pixels_per_process)]

        A_ = np.zeros((d, nr + np.size(f, 0)))
    
        try:  # if server is not running and raise exception if not installed or not started
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise

        if len(c) < n_processes:
            print len(c)
            raise Exception(
                "the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")

        dview = c[:n_processes]  # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)                        
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups)
        # clean up
       
        
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
        
        dview.results.clear()
        c.purge_results('all')
        c.purge_everything()
        c.close()

    elif backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1, sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
    
    #%
    print 'Updated Spatial Components'
   
    A_ = threshold_components(A_, d1, d2)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
    

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)
    
#    import pdb 
#    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    return A_, b, C
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:104,代码来源:spatial.py

示例13: print

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]



idx_components=np.union1d(idx_components_r,idx_components_raw)
idx_components=np.union1d(idx_components,idx_components_delta)  
idx_blobs=np.intersect1d(idx_components,idx_blobs)   
idx_components_bad=np.setdiff1d(list(range(len(traces))),idx_components)

print(' ***** ')
print((len(traces)))
print((len(idx_components)))
print((len(idx_blobs)))
#%% visualize components
#pl.figure();
pl.subplot(1,3,1)
crd = plot_contours(A2.tocsc()[:,idx_components],Cn,thr=0.9)
pl.subplot(1,3,2)
crd = plot_contours(A2.tocsc()[:,idx_blobs],Cn,thr=0.9)
pl.subplot(1,3,3)
crd = plot_contours(A2.tocsc()[:,idx_components_bad],Cn,thr=0.9)
#%%
view_patches_bar(Yr,scipy.sparse.coo_matrix(A2.tocsc()[:,idx_components]),C2[idx_components,:],b2,f2, dims[0],dims[1], YrA=YrA[idx_components,:],img=Cn)  
#%%
view_patches_bar(Yr,scipy.sparse.coo_matrix(A2.tocsc()[:,idx_components_bad]),C2[idx_components_bad,:],b2,f2, dims[0],dims[1], YrA=YrA[idx_components_bad,:],img=Cn)  
#%% STOP CLUSTER
pl.close()
if not single_thread:    
    c.close()
    cm.stop_server()
开发者ID:agiovann,项目名称:Constrained_NMF,代码行数:31,代码来源:demo_caiman.py

示例14: extract_rois_patch

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
def extract_rois_patch(file_name,d1,d2,rf=5,stride = 2):
    not_completed, in_progress
    rf=6
    stride = 2
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf,stride = stride)
    perctl=95
    n_components=2
    tol=1e-6
    max_iter=5000
    args_in=[]    
    for id_f,id_2d in zip(idx_flat,idx_2d):        
        args_in.append((file_name, id_f,id_2d[0].shape, perctl,n_components,tol,max_iter))
    st=time.time()
    try:
        if 1:
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(nmf_patches, args_in)                         
        else:
            file_res = map(nmf_patches, args_in)                         
    finally:
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
    
    print time.time()-st
    
    A1=lil_matrix((d1*d2,len(file_res)))
    C1=[]
    A2=lil_matrix((d1*d2,len(file_res)))
    C2=[]
    A_tot=lil_matrix((d1*d2,n_components*len(file_res)))
    C_tot=[];
    count_out=0
    for count,f in enumerate(file_res):
        idx_,flt,ca,d=f
        print count_out
        #flt,ca,_=cse.order_components(coo_matrix(flt),ca)
        
#        A1[idx_,count]=flt[:,0][:,np.newaxis]/np.sqrt(np.sum(flt[:,0]**2))      
#        A2[idx_,count]=flt[:,1][:,np.newaxis] /np.sqrt(np.sum(flt[:,1]**2))              
#        C1.append(ca[0,:])
#        C2.append(ca[1,:])
        for ccc in range(n_components):
            A_tot[idx_,count_out]=flt[:,ccc][:,np.newaxis]/np.sqrt(np.sum(flt[:,ccc]**2))      
            C_tot.append(ca[ccc,:])
            count_out+=1
#        pl.imshow(np.reshape(flt[:,0],d,order='F'),vmax=10)
#        pl.pause(.1)
        
    correlations=np.corrcoef(np.array(C_tot))
    centers=cse.com(A_tot.todense(),d1,d2)
    distances=sklearn.metrics.pairwise.euclidean_distances(centers)
    pl.imshow((correlations>0.8) & (distances<10))  
    
    Yr=np.load('Yr.npy',mmap_mode='r')
    [d,T]=Yr.shape
    Y=np.reshape(Yr,(d1,d2,T),order='F')
    options=cse.utilities.CNMFSetParms(Y,p=0)    
    res_merge=cse.merge_components(Yr,A_tot,[],np.array(C_tot),[],np.array(C_tot),[],options['temporal_params'],options['spatial_params'],thr=0.8)
    A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=res_merge
    A_norm=np.array([A_m[:,rr].toarray()/np.sqrt(np.sum(A_m[:,rr].toarray()**2)) for rr in range(A_m.shape[-1])]).T
    
    options=cse.utilities.CNMFSetParms(Y,p=2,K=np.shape(A_m)[-1])   
    
    Yr,sn,g=cse.pre_processing.preprocess_data(Yr,**options['preprocess_params'])
    
    epsilon=1e-2
    pixels_bckgrnd=np.nonzero(A_norm.sum(axis=-1)<epsilon)[0]
    f=np.sum(Yr[pixels_bckgrnd,:],axis=0)
    A2,b2,C2 = cse.spatial.update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
    A_or2, C_or2, srt2 = cse.utilities.order_components(A2,C2)
    A_norm2=np.array([A_or2[:,rr]/np.sqrt(np.sum(A_or2[:,rr]**2)) for rr in range(A_or2.shape[-1])]).T
    options['temporal_params']['p'] = 2 # set it back to original value to perform full deconvolution
    C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = cse.temporal.update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
    A_or, C_or, srt = cse.utilities.order_components(A2,C2)
    
    return A1,A2,C1
开发者ID:agiovann,项目名称:CalBlitz,代码行数:81,代码来源:segmentation_test.py

示例15: time

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import close [as 别名]
t1 = time()
file_res=cb.motion_correct_parallel(fnames,30,template=template,margins_out=0,max_shift_w=45, max_shift_h=45,dview=client_[::2],remove_blanks=False)
t2=time()-t1
print(t2)
#%%
fnames=[]
for file in glob.glob(base_folder+'k31_20160107_MMP_150um_65mW_zoom2p2_000*[0-9].hdf5'):
    fnames.append(file)
fnames.sort()
print(fnames)  
#%%
file_res=cb.utils.pre_preprocess_movie_labeling(client_[::2], fnames, median_filter_size=(2,1,1), 
                                  resize_factors=[.2,.1666666666],diameter_bilateral_blur=4)

#%%
client_.close()
cse.utilities.stop_server(is_slurm=True)

#%%

#%%
fold=os.path.split(os.path.split(fnames[0])[-2])[-1]
os.mkdir(fold)
#%%
files=glob.glob(fnames[0][:-20]+'*BL_compress_.tif')
files.sort()
print(files)
#%%
m=cb.load_movie_chain(files,fr=3)
m.play(backend='opencv',gain=10,fr=40)
#%%
开发者ID:agiovann,项目名称:Constrained_NMF,代码行数:33,代码来源:ParallelProcessing.py


注:本文中的ipyparallel.Client.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。