当前位置: 首页>>代码示例>>Python>>正文


Python Client.purge_results方法代码示例

本文整理汇总了Python中ipyparallel.Client.purge_results方法的典型用法代码示例。如果您正苦于以下问题:Python Client.purge_results方法的具体用法?Python Client.purge_results怎么用?Python Client.purge_results使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ipyparallel.Client的用法示例。


在下文中一共展示了Client.purge_results方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: motion_correct_parallel

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]
def motion_correct_parallel(file_names,fr,template=None,margins_out=0,max_shift_w=5, max_shift_h=5,remove_blanks=False,apply_smooth=True,backend='single_thread'):
    """motion correct many movies usingthe ipyparallel cluster
    Parameters
    ----------
    file_names: list of strings
        names of he files to be motion corrected
    fr: double
        fr parameters for calcblitz movie 
    margins_out: int
        number of pixels to remove from the borders    
    
    Return
    ------
    base file names of the motion corrected files
    """
    args_in=[];
    for f in file_names:
        args_in.append((f,fr,margins_out,template,max_shift_w, max_shift_h,remove_blanks,apply_smooth))
        
    try:
        
        if backend is 'ipyparallel':
            
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(process_movie_parallel, args_in)                         
            dview.results.clear()       
            c.purge_results('all')
            c.purge_everything()
            c.close()    

        elif backend is 'single_thread':
            
            file_res = map(process_movie_parallel, args_in)        
                 
        else:
            raise Exception('Unknown backend')
        
    except :   
        
        try:
            if backend is 'ipyparallel':
                
                dview.results.clear()       
                c.purge_results('all')
                c.purge_everything()
                c.close()
        except UnboundLocalError as uberr:

            print 'could not close client'

        raise
                                    
    return file_res
开发者ID:epnev,项目名称:CalBlitz,代码行数:56,代码来源:utils.py

示例2: extract_rois_patch

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]
def extract_rois_patch(file_name,d1,d2,rf=5,stride = 5):
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf,stride = stride)
    perctl=95
    n_components=2
    tol=1e-6
    max_iter=5000
    args_in=[]    
    for id_f,id_2d in zip(idx_flat,idx_2d):        
        args_in.append((file_name, id_f,id_2d[0].shape, perctl,n_components,tol,max_iter))
    st=time.time()
    print len(idx_flat)
    try:
        if 1:
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(nmf_patches, args_in)                         
        else:
            file_res = map(nmf_patches, args_in)                         
    finally:
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
    
    print time.time()-st
    
    A1=lil_matrix((d1*d2,len(file_res)))
    C1=[]
    A2=lil_matrix((d1*d2,len(file_res)))
    C2=[]
    for count,f in enumerate(file_res):
        idx_,flt,ca,d=f
        #flt,ca,_=cse.order_components(coo_matrix(flt),ca)
        A1[idx_,count]=flt[:,0][:,np.newaxis]        
        A2[idx_,count]=flt[:,1][:,np.newaxis]        
        C1.append(ca[0,:])
        C2.append(ca[1,:])
#        pl.imshow(np.reshape(flt[:,0],d,order='F'),vmax=10)
#        pl.pause(.1)
        
        
    return A1,A2,C1,C2
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:44,代码来源:map_reduce.py

示例3: update_spatial_components_parallel

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]

#.........这里部分代码省略.........
        np.save(C_name,Cf)

        if type(Y) is np.core.memmap: # if input file is already memory mapped then find the filename 
            Y_name=Y.filename
        else:                        # if not create a memory mapped version (necessary for parallelization) 
            Y_name = os.path.join(folder, 'Y_temp.npy') 
            np.save(Y_name,Y)   
            Y=np.load(Y_name,mmap_mode='r') 
            
        # create arguments to be passed to the function. Here we are grouping bunch of pixels to be processed by each thread    
        pixel_groups=[(Y_name,C_name,sn,ind2_,range(i,i+n_pixels_per_process)) for i in range(0,d1*d2-n_pixels_per_process+1,n_pixels_per_process)]

        A_ = np.zeros((d,nr+np.size(f,0)))
        try: # if server is not running and raise exception if not installed or not started        
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise
        
        if len(c) <  n_processes:
            print len(c)
            raise Exception("the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")            
            
        dview=c[:n_processes] # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups) 
        for chunk in parallel_result:
            for pars in chunk:
                px,idxs_,a=pars
                A_[px,idxs_]=a
        #clean up        
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
        
        
             
    elif backend=='single_thread':      

        Cf_=[Cf[idx_,:] for idx_ in ind2_]

        #% LARS regression 
        A_ = np.hstack((np.zeros((d,nr)),np.zeros((d,np.size(f,0)))))
        
        
        for c,y,s,id2_,px in zip(Cf_,Y,sn,ind2_,range(d)):
            if px%1000==0: 
                    print px
            if np.size(c)>0:                
                _, _, a, _ , _= lars_regression_noise(y, np.array(c.T), 1, sn[px]**2*T)
                if np.isscalar(a):
                    A_[px,id2_]=a
                else:
                    A_[px,id2_]=a.T
        
    else:
        raise Exception('Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
        
    #%
    print 'Updated Spatial Components'
    A_=threshold_components(A_, d1, d2)
    print "threshold"
    ff = np.where(np.sum(A_,axis=0)==0);           # remove empty components
    if np.size(ff)>0:
开发者ID:ninehundred1,项目名称:Constrained_NMF,代码行数:70,代码来源:spatial.py

示例4: run_CNMF_patches

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]
def run_CNMF_patches(file_name, shape, options, rf=16, stride = 4, n_processes=2, backend='single_thread',memory_fact=1):
    """Function that runs CNMF in patches, either in parallel or sequentiually, and return the result for each. It requires that ipyparallel is running
        
    Parameters
    ----------        
    file_name: string
        full path to an npy file (2D, pixels x time) containing the movie        
        
    shape: tuple of thre elements
        dimensions of the original movie across y, x, and time 
    
    options:
        dictionary containing all the parameters for the various algorithms
    
    rf: int 
        half-size of the square patch in pixel
    
    stride: int
        amount of overlap between patches
        
    backend: string
        'ipyparallel' or 'single_thread'
    
    n_processes: int
        nuber of cores to be used (should be less than the number of cores started with ipyparallel)
        
    memory_fact: double
        unitless number accounting how much memory should be used. It represents the fration of patch processed in a single thread. You will need to try different values to see which one would work
    
    
    Returns
    -------
    A_tot: matrix containing all the componenents from all the patches
    
    C_tot: matrix containing the calcium traces corresponding to A_tot
    
    sn_tot: per pixel noise estimate
    
    optional_outputs: set of outputs related to the result of CNMF ALGORITHM ON EACH patch   
    """
    (d1,d2,T)=shape
    d=d1*d2
    K=options['init_params']['K']
    
    options['preprocess_params']['backend']='single_thread' 
    options['preprocess_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['spatial_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['temporal_params']['n_pixels_per_process']=np.int((rf*rf)/memory_fact)
    options['spatial_params']['backend']='single_thread'
    options['temporal_params']['backend']='single_thread'

    
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf, stride = stride)
#    import pdb 
#    pdb.set_trace()
    args_in=[]    
    for id_f,id_2d in zip(idx_flat[:],idx_2d[:]):        
        args_in.append((file_name, id_f,id_2d[0].shape, options))

    print len(idx_flat)

    st=time.time()        
    
    if backend is 'ipyparallel':

        try:

            c = Client()   
            dview=c[:n_processes]
            file_res = dview.map_sync(cnmf_patches, args_in)        
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()
            c.close()         
        except:
            print('Something went wrong')  
            raise
        finally:
            print('You may think that it went well but reality is harsh')
                    

    elif backend is 'single_thread':

        file_res = map(cnmf_patches, args_in)                         

    else:
        raise Exception('Backend unknown')
            
      
    print time.time()-st
    
    
    # extract the values from the output of mapped computation
    num_patches=len(file_res)
    
    A_tot=scipy.sparse.csc_matrix((d,K*num_patches))
    B_tot=scipy.sparse.csc_matrix((d,num_patches))
    C_tot=np.zeros((K*num_patches,T))
    F_tot=np.zeros((num_patches,T))
    mask=np.zeros(d)
#.........这里部分代码省略.........
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:103,代码来源:map_reduce.py

示例5: update_temporal_components_parallel

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]

#.........这里部分代码省略.........
            print len(c)
            raise Exception("the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")            
        
        dview=c[:n_processes] # use the number of processes
    
    Cin=np.array(Cin.todense())    
    for iter in range(ITER):
        O,lo = update_order(A.tocsc()[:,:nr])
        P_=[];
        for count,jo_ in enumerate(O):
            jo=np.array(list(jo_))           
            Ytemp = YrA[:,jo.flatten()] + (np.dot(np.diag(nA[jo]),Cin[jo,:])).T
            Ctemp = np.zeros((np.size(jo),T))
            Stemp = np.zeros((np.size(jo),T))
            btemp = np.zeros((np.size(jo),1))
            sntemp = btemp.copy()
            c1temp = btemp.copy()
            gtemp = np.zeros((np.size(jo),kwargs['p']));
            nT = nA[jo]            
            
            
#            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, bl[jo[jj]], c1[jo[jj]], g[jo[jj]], sn[jo[jj]], kwargs) for jj in range(len(jo))]
            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, None, None, None, None, kwargs) for jj in range(len(jo))]
            
            if backend == 'ipyparallel':                    
                
                results = dview.map_sync(constrained_foopsi_parallel,args_in)        

            elif backend == 'single_thread':
                
                results = map(constrained_foopsi_parallel,args_in)            
                
            else:
                
                raise Exception('Backend not defined. Use either single_thread or ipyparallel')
                
            for chunk in results:
                #pars=dict(kwargs)
                C_,Sp_,Ytemp_,cb_,c1_,sn_,gn_,jj_=chunk                    
                Ctemp[jj_,:] = C_[None,:]
                                
                Stemp[jj_,:] = Sp_               
                Ytemp[:,jj_] = Ytemp_[:,None]            
                btemp[jj_] = cb_
                c1temp[jj_] = c1_
                sntemp[jj_] = sn_   
                gtemp[jj_,:] = gn_.T  
                   
                bl[jo[jj_]] = cb_
                c1[jo[jj_]] = c1_
                sn[jo[jj_]] = sn_
                g[jo[jj_]]  = gtemp[jj,:]#[jj_,np.abs(gtemp[jj,:])>0] 
                             
                
                #pars['b'] = cb_
#                pars['c1'] = c1_                 
#                pars['neuron_sn'] = sn_
#                pars['gn'] = gtemp[jj_,np.abs(gtemp[jj,:])>0] 
#                
##                for jj = 1:length(O{jo})
##                    P.gn(O{jo}(jj)) = {gtemp(jj,abs(gtemp(jj,:))>0)'};
##                end
#                pars['neuron_id'] = jo[jj_]
#                P_.append(pars)
            
            YrA[:,jo] = Ytemp
            C[jo,:] = Ctemp            
            S[jo,:] = Stemp
            
#            if (np.sum(lo[:jo])+1)%1 == 0:
            print str(np.sum(lo[:count])) + ' out of total ' + str(nr) + ' temporal components updated \n'
        
        ii=nr        
        YrA[:,ii] = YrA[:,ii] + nA[ii]*np.atleast_2d(Cin[ii,:]).T
        cc = np.maximum(YrA[:,ii]/nA[ii],0)
        C[ii,:] = cc[:].T
        YrA[:,ii] = YrA[:,ii] - nA[ii]*np.atleast_2d(C[ii,:]).T 
        
        if backend == 'ipyparallel':       
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()

        if scipy.linalg.norm(Cin - C,'fro')/scipy.linalg.norm(C,'fro') <= 1e-3:
            # stop if the overall temporal component does not change by much
            print "stopping: overall temporal component not changing significantly"
            break
        else:
            Cin = C
    
    Y_res = Y - A*C # this includes the baseline term
    
    f = C[nr:,:]
    C = C[:nr,:]
        
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 
    if backend == 'ipyparallel':      
        c.close()
    
    return C,f,Y_res,S,bl,c1,sn,g
开发者ID:garretstuber,项目名称:Constrained_NMF,代码行数:104,代码来源:temporal.py

示例6: update_spatial_components

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]

#.........这里部分代码省略.........
        else:
            Y_name = os.path.join(folder, 'Y_temp.npy')
            np.save(Y_name, Y)            
            Y,_,_,_=load_memmap(Y_name)    

        # create arguments to be passed to the function. Here we are grouping
        # bunch of pixels to be processed by each thread
        pixel_groups = [(Y_name, C_name, sn, ind2_, range(i, i + n_pixels_per_process))
                        for i in range(0, d1 * d2 - n_pixels_per_process + 1, n_pixels_per_process)]

        A_ = np.zeros((d, nr + np.size(f, 0)))
    
        try:  # if server is not running and raise exception if not installed or not started
            from ipyparallel import Client
            c = Client()
        except:
            print "this backend requires the installation of the ipyparallel (pip install ipyparallel) package and  starting a cluster (type ipcluster start -n 6) where 6 is the number of nodes"
            raise

        if len(c) < n_processes:
            print len(c)
            raise Exception(
                "the number of nodes in the cluster are less than the required processes: decrease the n_processes parameter to a suitable value")

        dview = c[:n_processes]  # use the number of processes
        #serial_result = map(lars_regression_noise_ipyparallel, pixel_groups)                        
        parallel_result = dview.map_sync(lars_regression_noise_ipyparallel, pixel_groups)
        # clean up
       
        
        for chunk in parallel_result:
            for pars in chunk:
                px, idxs_, a = pars
                A_[px, idxs_] = a
        
        dview.results.clear()
        c.purge_results('all')
        c.purge_everything()
        c.close()

    elif backend == 'single_thread':

        Cf_ = [Cf[idx_, :] for idx_ in ind2_]

        #% LARS regression
        A_ = np.hstack((np.zeros((d, nr)), np.zeros((d, np.size(f, 0)))))

        for c, y, s, id2_, px in zip(Cf_, Y, sn, ind2_, range(d)):
            if px % 1000 == 0:
                print px
            if np.size(c) > 0:
                _, _, a, _, _ = lars_regression_noise(y, np.array(c.T), 1, sn[px]**2 * T)
                if np.isscalar(a):
                    A_[px, id2_] = a
                else:
                    A_[px, id2_] = a.T

    else:
        raise Exception(
            'Unknown backend specified: use single_thread, threading, multiprocessing or ipyparallel')
    
    #%
    print 'Updated Spatial Components'
   
    A_ = threshold_components(A_, d1, d2)

    print "threshold"
    ff = np.where(np.sum(A_, axis=0) == 0)           # remove empty components
    if np.size(ff) > 0:
        ff = ff[0]
        print('eliminating empty components!!')
        nr = nr - len(ff)
        A_ = np.delete(A_, list(ff), 1)
        C = np.delete(C, list(ff), 0)
    

    A_ = A_[:, :nr]
    A_ = coo_matrix(A_)
    
#    import pdb 
#    pdb.set_trace()
    Y_resf = np.dot(Y, f.T) - A_.dot(coo_matrix(C[:nr, :]).dot(f.T))
    print "Computing A_bas"
    A_bas = np.fmax(Y_resf / scipy.linalg.norm(f)**2, 0)  # update baseline based on residual
    # A_bas = np.fmax(np.dot(Y_res,f.T)/scipy.linalg.norm(f)**2,0) # update
    # baseline based on residual
    b = A_bas

    print("--- %s seconds ---" % (time.time() - start_time))

    try:  # clean up
        # remove temporary file created
        print "Remove temporary file created"
        shutil.rmtree(folder)

    except:

        raise Exception("Failed to delete: " + folder)

    return A_, b, C
开发者ID:sebiRolotti,项目名称:Constrained_NMF,代码行数:104,代码来源:spatial.py

示例7: update_temporal_components

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]

#.........这里部分代码省略.........
            #Ytemp = YrA[:,jo.flatten()] + (np.dot(np.diag(nA[jo]),Cin[jo,:])).T
            Ytemp = YrA[:,jo.flatten()] + Cin[jo,:].T
            Ctemp = np.zeros((np.size(jo),T))
            Stemp = np.zeros((np.size(jo),T))
            btemp = np.zeros((np.size(jo),1))
            sntemp = btemp.copy()
            c1temp = btemp.copy()
            gtemp = np.zeros((np.size(jo),kwargs['p']));
            nT = nA[jo]            
                        
#            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, bl[jo[jj]], c1[jo[jj]], g[jo[jj]], sn[jo[jj]], kwargs) for jj in range(len(jo))]
            args_in=[(np.squeeze(np.array(Ytemp[:,jj])), nT[jj], jj, None, None, None, None, kwargs) for jj in range(len(jo))]
#            import pdb
#            pdb.set_trace()
            if backend == 'ipyparallel':                    
                #
                if debug:                
                    results = dview.map_async(constrained_foopsi_parallel,args_in)  
                    results.get()
                    for outp in results.stdout:   
                        print outp[:-1]  
                        sys.stdout.flush()                                                 
                    for outp in results.stderr:   
                        print outp[:-1]  
                        sys.stderr.flush()            
                    
                else:
                    
                    results = dview.map_sync(constrained_foopsi_parallel,args_in)
                
            elif backend == 'single_thread':
                
                results = map(constrained_foopsi_parallel,args_in)            
                
            else:
                
                raise Exception('Backend not defined. Use either single_thread or ipyparallel')
                
            for chunk in results:
                pars=dict()
                C_,Sp_,Ytemp_,cb_,c1_,sn_,gn_,jj_=chunk                    
                Ctemp[jj_,:] = C_[None,:]
                                
                Stemp[jj_,:] = Sp_               
                Ytemp[:,jj_] = Ytemp_[:,None]            
                btemp[jj_] = cb_
                c1temp[jj_] = c1_
                sntemp[jj_] = sn_   
                gtemp[jj_,:] = gn_.T  
                   
                bl[jo[jj_]] = cb_
                c1[jo[jj_]] = c1_
                sn[jo[jj_]] = sn_
                g[jo[jj_]]  = gn_.T if kwargs['p'] > 0 else [] #gtemp[jj,:]
                                             
                pars['b'] = cb_
                pars['c1'] = c1_                 
                pars['neuron_sn'] = sn_
                pars['gn'] = gtemp[jj_,np.abs(gtemp[jj,:])>0] 
                pars['neuron_id'] = jo[jj_]
                P_.append(pars)
            
            YrA -= (Ctemp-C[jo,:]).T*AA[jo,:]
            #YrA[:,jo] = Ytemp
            C[jo,:] = Ctemp.copy()            
            S[jo,:] = Stemp
            
#            if (np.sum(lo[:jo])+1)%1 == 0:
            print str(np.sum(lo[:count+1])) + ' out of total ' + str(nr) + ' temporal components updated'
        
        ii=nr        

        #YrA[:,ii] = YrA[:,ii] + np.atleast_2d(Cin[ii,:]).T
        #cc = np.maximum(YrA[:,ii],0)        
        cc = np.maximum(YrA[:,ii] + np.atleast_2d(Cin[ii,:]).T,0)
        YrA -= (cc-np.atleast_2d(Cin[ii,:]).T)*AA[ii,:]      
        C[ii,:] = cc.T
        #YrA = YA - C.T.dot(AA)
        #YrA[:,ii] = YrA[:,ii] - np.atleast_2d(C[ii,:]).T                
        
        if backend == 'ipyparallel':       
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()

        if scipy.linalg.norm(Cin - C,'fro')/scipy.linalg.norm(C,'fro') <= 1e-3:
            # stop if the overall temporal component does not change by much
            print "stopping: overall temporal component not changing significantly"
            break
        else:
            Cin = C
        
    f = C[nr:,:]
    C = C[:nr,:]
    YrA = np.array(YrA[:,:nr]).T    
    P_ = sorted(P_, key=lambda k: k['neuron_id']) 
    if backend == 'ipyparallel':      
        c.close()
    
    return C,f,S,bl,c1,sn,g,YrA #,P_
开发者ID:valentina-s,项目名称:Constrained_NMF,代码行数:104,代码来源:temporal.py

示例8: __init__

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]

#.........这里部分代码省略.........
                allInpt.append([akey, 'ff', temperature, wavelength, abundance, em])
            if 'fb' in self.Todo[akey]:
                allInpt.append([akey, 'fb', temperature, wavelength, abundance, em])
            if 'line' in self.Todo[akey]:
                allInpt.append([akey, 'line', temperature, eDensity, wavelength, filter, allLines, abundance, em, doContinuum])
        #
        result = lbvAll.map_sync(doAll, allInpt)
        if verbose:
            print(' got all ff, fb, line results')
        ionsCalculated = []
        #
        for ijk in range(len(result)):
            out = result[ijk]
            if type(out) != list:
                print(' a problem has occured - this can be caused by')
                print('running Python3 and not using ipcluster3')
                return
            ionS = out[0]
            if verbose:
                print(' collecting calculation for %s'%(ionS))
            ionsCalculated.append(ionS)
            calcType = out[1]
            if verbose:
                print(' processing %s results'%(calcType))
            #
            if calcType == 'ff':
                thisFf = out[2]
                if keepIons:
                    self.FfInstances[ionS] = thisFf
                freeFree += thisFf
            elif calcType == 'fb':
                thisFb = out[2]
                if verbose:
                    print(' fb ion = %s'%(ionS))
                if hasattr(thisFb, 'FreeBound'):
                    if 'errorMessage' not in sorted(thisFb.keys()):
                        if keepIons:
                            self.FbInstances[ionS] = thisFb
                        freeBound += thisFb['rate']
                    else:
                        print(thisFb['errorMessage'])
            elif calcType == 'line':
                thisIon = out[2]
                if not 'errorMessage' in sorted(thisIon.Intensity.keys()):
                    if keepIons:
                        self.IonInstances[ionS] = thisIon
                    thisIntensity = thisIon.Intensity
    ##                self.IonInstances.append(copy.deepcopy(thisIon))
                    if setupIntensity:
                        for akey in sorted(self.Intensity.keys()):
                            self.Intensity[akey] = np.hstack((self.Intensity[akey], thisIntensity[akey]))
                    else:
                        setupIntensity = 1
                        self.Intensity  = thisIntensity
                    #
                    lineSpectrum += thisIon.Spectrum['intensity']
                   # check for two-photon emission
                    if len(out) == 4:
                        tp = out[3]
                        if self.NTempDen == 1:
                            twoPhoton += tp['intensity']
                        else:
                            for iTempDen in range(self.NTempDen):
                                twoPhoton[iTempDen] += tp['rate'][iTempDen]
                else:
                    if 'errorMessage' in sorted(thisIon.Intensity.keys()):
                        print(thisIon.Intensity['errorMessage'])
        #
        #
        self.IonsCalculated = ionsCalculated
        #
        #
        self.FreeFree = {'wavelength':wavelength, 'intensity':freeFree.squeeze()}
        self.FreeBound = {'wavelength':wavelength, 'intensity':freeBound.squeeze()}
        self.LineSpectrum = {'wavelength':wavelength, 'intensity':lineSpectrum.squeeze()}
        self.TwoPhoton = {'wavelength':wavelength, 'intensity':twoPhoton.squeeze()}
        #
        total = freeFree + freeBound + lineSpectrum + twoPhoton
        #
        t2 = datetime.now()
        dt=t2-t1
        print(' elapsed seconds = %12.3e'%(dt.seconds))
        rcAll.purge_results('all')
        #
        if self.NTempDen == 1:
            integrated = total
        else:
            integrated = total.sum(axis=0)
        #
        if type(label) == type(''):
            if hasattr(self, 'Spectrum'):
                print(' hasattr = true')
                self.Spectrum[label] = {'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__,   'width':filter[1], 'integrated':integrated, 'em':em,  'Abundance':self.AbundanceName,
                                            'xlabel':xlabel, 'ylabel':ylabel}
            else:
                self.Spectrum = {label:{'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__,   'width':filter[1], 'integrated':integrated, 'em':em, 'Abundance':self.AbundanceName,
                                'xlabel':xlabel, 'ylabel':ylabel}}
        else:
            self.Spectrum = {'wavelength':wavelength, 'intensity':total.squeeze(), 'filter':filter[0].__name__,   'width':filter[1], 'integrated':integrated, 'em':em, 'Abundance':self.AbundanceName,
                                'xlabel':xlabel, 'ylabel':ylabel}
开发者ID:wtbarnes,项目名称:ChiantiPy,代码行数:104,代码来源:IpyMspectrum.py

示例9: extract_rois_patch

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]
def extract_rois_patch(file_name,d1,d2,rf=5,stride = 2):
    not_completed, in_progress
    rf=6
    stride = 2
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf,stride = stride)
    perctl=95
    n_components=2
    tol=1e-6
    max_iter=5000
    args_in=[]    
    for id_f,id_2d in zip(idx_flat,idx_2d):        
        args_in.append((file_name, id_f,id_2d[0].shape, perctl,n_components,tol,max_iter))
    st=time.time()
    try:
        if 1:
            c = Client()   
            dview=c[:]
            file_res = dview.map_sync(nmf_patches, args_in)                         
        else:
            file_res = map(nmf_patches, args_in)                         
    finally:
        dview.results.clear()   
        c.purge_results('all')
        c.purge_everything()
        c.close()
    
    print time.time()-st
    
    A1=lil_matrix((d1*d2,len(file_res)))
    C1=[]
    A2=lil_matrix((d1*d2,len(file_res)))
    C2=[]
    A_tot=lil_matrix((d1*d2,n_components*len(file_res)))
    C_tot=[];
    count_out=0
    for count,f in enumerate(file_res):
        idx_,flt,ca,d=f
        print count_out
        #flt,ca,_=cse.order_components(coo_matrix(flt),ca)
        
#        A1[idx_,count]=flt[:,0][:,np.newaxis]/np.sqrt(np.sum(flt[:,0]**2))      
#        A2[idx_,count]=flt[:,1][:,np.newaxis] /np.sqrt(np.sum(flt[:,1]**2))              
#        C1.append(ca[0,:])
#        C2.append(ca[1,:])
        for ccc in range(n_components):
            A_tot[idx_,count_out]=flt[:,ccc][:,np.newaxis]/np.sqrt(np.sum(flt[:,ccc]**2))      
            C_tot.append(ca[ccc,:])
            count_out+=1
#        pl.imshow(np.reshape(flt[:,0],d,order='F'),vmax=10)
#        pl.pause(.1)
        
    correlations=np.corrcoef(np.array(C_tot))
    centers=cse.com(A_tot.todense(),d1,d2)
    distances=sklearn.metrics.pairwise.euclidean_distances(centers)
    pl.imshow((correlations>0.8) & (distances<10))  
    
    Yr=np.load('Yr.npy',mmap_mode='r')
    [d,T]=Yr.shape
    Y=np.reshape(Yr,(d1,d2,T),order='F')
    options=cse.utilities.CNMFSetParms(Y,p=0)    
    res_merge=cse.merge_components(Yr,A_tot,[],np.array(C_tot),[],np.array(C_tot),[],options['temporal_params'],options['spatial_params'],thr=0.8)
    A_m,C_m,nr_m,merged_ROIs,S_m,bl_m,c1_m,sn_m,g_m=res_merge
    A_norm=np.array([A_m[:,rr].toarray()/np.sqrt(np.sum(A_m[:,rr].toarray()**2)) for rr in range(A_m.shape[-1])]).T
    
    options=cse.utilities.CNMFSetParms(Y,p=2,K=np.shape(A_m)[-1])   
    
    Yr,sn,g=cse.pre_processing.preprocess_data(Yr,**options['preprocess_params'])
    
    epsilon=1e-2
    pixels_bckgrnd=np.nonzero(A_norm.sum(axis=-1)<epsilon)[0]
    f=np.sum(Yr[pixels_bckgrnd,:],axis=0)
    A2,b2,C2 = cse.spatial.update_spatial_components(Yr, C_m, f, A_m, sn=sn, **options['spatial_params'])
    A_or2, C_or2, srt2 = cse.utilities.order_components(A2,C2)
    A_norm2=np.array([A_or2[:,rr]/np.sqrt(np.sum(A_or2[:,rr]**2)) for rr in range(A_or2.shape[-1])]).T
    options['temporal_params']['p'] = 2 # set it back to original value to perform full deconvolution
    C2,f2,S2,bl2,c12,neurons_sn2,g21,YrA = cse.temporal.update_temporal_components(Yr,A2,b2,C2,f,bl=None,c1=None,sn=None,g=None,**options['temporal_params'])
    A_or, C_or, srt = cse.utilities.order_components(A2,C2)
    
    return A1,A2,C1
开发者ID:agiovann,项目名称:CalBlitz,代码行数:81,代码来源:segmentation_test.py

示例10: run_CNMF_patches

# 需要导入模块: from ipyparallel import Client [as 别名]
# 或者: from ipyparallel.Client import purge_results [as 别名]
def run_CNMF_patches(file_name, shape, options, rf=16, stride = 4, n_processes=2, backend='single_thread'):
    """
    Function that runs CNMF in patches, either in parallel or sequentiually, and return the result for each. It requires that ipyparallel is running
        
    Parameters
    ----------        
    file_name: string
        full path to an npy file (2D, pixels x time) containing the movie        
        
    shape: tuple of thre elements
        dimensions of the original movie across y, x, and time 
    
    options:
        dictionary containing all the parameters for the various algorithms
    
    rf: int 
        half-size of the square patch in pixel
    
    stride: int
        amount of overlap between patches
        
    backend: string
        'ipyparallel' or 'single_thread'
    
    
    Returns
    -------
    A_tot:
    
    C_tot:
    
    sn_tot:
    
    optional_outputs:    
    """
    (d1,d2,T)=shape
    d=d1*d2
    K=options['init_params']['K']
    
    idx_flat,idx_2d=extract_patch_coordinates(d1, d2, rf=rf, stride = stride)
    
    args_in=[]    
    for id_f,id_2d in zip(idx_flat[:],idx_2d[:]):        
        args_in.append((file_name, id_f,id_2d[0].shape, options))

    print len(idx_flat)

    st=time.time()        
    
    if backend is 'ipyparallel':

        try:

            c = Client()   
            dview=c[:n_processes]
            file_res = dview.map_sync(cnmf_patches, args_in)        

        finally:
            
            dview.results.clear()   
            c.purge_results('all')
            c.purge_everything()
            c.close()                   

    elif backend is 'single_thread':

        file_res = map(cnmf_patches, args_in)                         

    else:
        raise Exception('Backend unknown')
            
      
    print time.time()-st
    
    
    # extract the values from the output of mapped computation
    num_patches=len(file_res)
    
    A_tot=scipy.sparse.csc_matrix((d,K*num_patches))
    C_tot=np.zeros((K*num_patches,T))
    sn_tot=np.zeros((d1*d2))
    b_tot=[]
    f_tot=[]
    bl_tot=[]
    c1_tot=[]
    neurons_sn_tot=[]
    g_tot=[]    
    idx_tot=[];
    shapes_tot=[]    
    id_patch_tot=[]
    
    count=0  
    patch_id=0

    print 'Transforming patches into full matrix'
    
    for idx_,shapes,A,b,C,f,S,bl,c1,neurons_sn,g,sn,_ in file_res:
    
        sn_tot[idx_]=sn
        b_tot.append(b)
#.........这里部分代码省略.........
开发者ID:valentina-s,项目名称:Constrained_NMF,代码行数:103,代码来源:map_reduce.py


注:本文中的ipyparallel.Client.purge_results方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。