当前位置: 首页>>代码示例>>Python>>正文


Python ProcessingPool.close方法代码示例

本文整理汇总了Python中pathos.multiprocessing.ProcessingPool.close方法的典型用法代码示例。如果您正苦于以下问题:Python ProcessingPool.close方法的具体用法?Python ProcessingPool.close怎么用?Python ProcessingPool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pathos.multiprocessing.ProcessingPool的用法示例。


在下文中一共展示了ProcessingPool.close方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: analyze

# 需要导入模块: from pathos.multiprocessing import ProcessingPool [as 别名]
# 或者: from pathos.multiprocessing.ProcessingPool import close [as 别名]
class analyze(setup.setup):

    def __init__(self,args,logging_level=logging.INFO):

         super(analyze, self ).__init__(args,logging_level)


    # set up processing pool and run all analyses specified in args
    def run(self):


        if self.args.jumpdists:
            n_bins=100.
            bin_width = 1/n_bins
            bins = np.arange(0,1+bin_width,1/n_bins)

            if self.args.file:
                user,vals = self.artist_jump_distributions(self.args.file,bins=bins,self_jumps=False)
                with open(self.args.resultdir+user,'w') as fout:
                    fout.write(','.join(vals.astype(str))+'\n')



            else:
                raise('not implemented!')
                self.pool = Pool(self.args.n)
                self.rootLogger.info("Pool started")

                self.rootLogger.info("Starting jump distance analysis")

                func_partial = partial(self.artist_jump_distributions,bins=bins,self_jumps=False)
                with open(self.args.resultdir+'jumpdists','w') as fout:
                    for user,vals in self.pool.imap(func_partial,self.listen_files):
                        fout.write(user+'\t'+','.join(vals.astype(str))+'\n')

                self.pool.close()
                self.rootLogger.info("Pool closed")

        if self.args.blockdists:
            #self.rootLogger.info("Starting block distance analysis")
            self.mean_block_distances(self.args.file)

        if self.args.diversity_dists:
            bins = np.arange(0,1.01,.01)
            self.diversity_distributions(self.args.file,bins=bins)

        if self.args.clustering:
            self.clustering(self.args.file)

        if self.args.values:
            self.patch_values(self.args.file)

        if self.args.exp:
            self.explore_exploit(self.args.file)

        if self.args.patch_len_dists:
            self.patch_len_dists(self.args.file)


    # calculate distribution (using histogram with specified bins)
    # of sequential artist-to-artist distances
    def artist_jump_distributions(self,fi,bins,self_jumps=False):
        user = fi.split('/')[-1][:-4]
        df = pd.read_pickle(fi)
        if self_jumps:
            vals = np.histogram(df['dist'].dropna(),bins=bins)[0]
        else:
            vals = np.histogram(df['dist'][df['dist']>0],bins=bins)[0]
        self.rootLogger.info('artist jump distances done for user {} ({})'.format(user,fi))
        return user,vals

    # calculate distribution (using histogram with specified bins)
    # of patch diversity for each user

    # awk 'FNR==1' * > diversity_dists_zeros
    # awk 'FNR==2' * > diversity_dists_nozeros
    def diversity_distributions(self,fi,bins):
        if 'patches' not in fi:
            raise('WRONG DATATYPE')
        user = fi.split('/')[-1].split('_')[0]
        df = pd.read_pickle(fi).dropna(subset=['diversity'])
        zeros = np.histogram(df[df['n']>=5]['diversity'],bins=bins)[0]
        nozeros = np.histogram(df[(df['n']>=5)&(df['diversity']>0)]['diversity'],bins=bins)[0]

        zeros = zeros/float(zeros.sum())
        nozeros = nozeros/float(nozeros.sum())

        with open(self.args.resultdir+user,'w') as fout:
            fout.write(user+'\t'+'zeros'+'\t'+','.join(zeros.astype(str))+'\n')
            fout.write(user+'\t'+'nozeros'+'\t'+','.join(nozeros.astype(str))+'\n')
        self.rootLogger.info('diversity distributions done for user {} ({})'.format(user,fi))


    def mean_block_distances(self,fi,n=100):

        def cos_nan(arr1,arr2):
            if np.any(np.isnan(arr1)) or np.any(np.isnan(arr2)):
                return np.nan
            else:
                return cosine(arr1,arr2)
#.........这里部分代码省略.........
开发者ID:jlorince,项目名称:MusicForaging,代码行数:103,代码来源:patchAnalyses.py

示例2: integrate_model

# 需要导入模块: from pathos.multiprocessing import ProcessingPool [as 别名]
# 或者: from pathos.multiprocessing.ProcessingPool import close [as 别名]
    def integrate_model(self, n_realizations, int_length = None, noise_type = 'white', sigma = 1., n_workers = 3, diagnostics = True):
        """
        Integrate trained model.
        noise_type:
        -- white - classic white noise, spatial correlation by cov. matrix of last level residuals
        -- cond - find n_samples closest to the current space in subset of n_pcs and use their cov. matrix
        -- seasonal - seasonal dependence of the residuals, fit n_harm harmonics of annual cycle, could also be used with cond.
        except 'white', one can choose more settings like ['seasonal', 'cond']
        """

        if self.verbose:
            print("preparing to integrate model...")

        pcs = self.input_pcs.copy()
        pcs = pcs.T # time x dim

        pcmax = np.amax(pcs, axis = 0)
        pcmin = np.amin(pcs, axis = 0)
        self.varpc = np.var(pcs, axis = 0, ddof = 1)
        
        self.int_length = pcs.shape[0] if int_length is None else int_length

        self.diagnostics = diagnostics

        if self.harmonic_pred in ['all', 'first']:
            if self.verbose:
                print("...using harmonic predictors (with annual frequency)...")
            self.xsin = np.sin(2*np.pi*np.arange(self.int_length) / 12.)
            self.xcos = np.cos(2*np.pi*np.arange(self.int_length) / 12.)

        if self.verbose:
            print("...preparing noise forcing...")

        self.sigma = sigma
        if isinstance(noise_type, basestring):
            if noise_type not in ['white', 'cond', 'seasonal']:
                raise Exception("Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'.")
        elif isinstance(noise_type, list):
            noise_type = frozenset(noise_type)
            if not noise_type.issubset(set(['white', 'cond', 'seasonal'])):
                raise Exception("Unknown noise type to be used as forcing. Use 'white', 'cond', or 'seasonal'.")
        
        self.last_level_res = self.residuals[max(self.residuals.keys())]
        self.noise_type = noise_type
        if noise_type == 'white':
            if self.verbose:
                print("...using spatially correlated white noise...")
            Q = np.cov(self.last_level_res, rowvar = 0)
            self.rr = np.linalg.cholesky(Q).T

        if 'seasonal' in noise_type:
            n_harmonics = 5
            if self.verbose:
                print("...fitting %d harmonics to estimate seasonal modulation of last level's residual..." % n_harmonics)
            if self.delay_model:
                resid_delayed = self.last_level_res[-(self.last_level_res.shape[0]//12)*12:].copy()
                rr_last = np.reshape(resid_delayed, (12, self.last_level_res.shape[0]//12, self.last_level_res.shape[1]), order = 'F')
            else:
                rr_last = np.reshape(self.last_level_res, (12, self.last_level_res.shape[0]//12, self.last_level_res.shape[1]), order = 'F')
            rr_last_std = np.nanstd(rr_last, axis = 1, ddof = 1)
            predictors = np.zeros((12, 2*n_harmonics + 1))
            for nh in range(n_harmonics):
                predictors[:, 2*nh] = np.cos(2*np.pi*(nh+1)*np.arange(12) / 12)
                predictors[:, 2*nh+1] = np.sin(2*np.pi*(nh+1)*np.arange(12) / 12)
            predictors[:, -1] = np.ones((12,))
            bamp = np.zeros((predictors.shape[1], pcs.shape[1]))
            for k in range(bamp.shape[1]):
                bamp[:, k] = np.linalg.lstsq(predictors, rr_last_std[:, k])[0]
            rr_last_std_ts = np.dot(predictors, bamp)
            self.rr_last_std_ts = np.repeat(rr_last_std_ts, repeats = self.last_level_res.shape[0]//12, axis = 0)
            if self.delay_model:
                resid_delayed /= self.rr_last_std_ts
                Q = np.cov(resid_delayed, rowvar = 0)
            else:
                self.last_level_res /= self.rr_last_std_ts
                Q = np.cov(self.last_level_res, rowvar = 0)

            self.rr = np.linalg.cholesky(Q).T


        if diagnostics:
            if self.verbose:
                print("...running diagnostics for the data...")
            # ACF, kernel density, integral corr. timescale for data
            self.max_lag = 50
            lag_cors = np.zeros((2*self.max_lag + 1, pcs.shape[1]))
            kernel_densities = np.zeros((100, pcs.shape[1], 2))
            for k in range(pcs.shape[1]):
                lag_cors[:, k] = cross_correlation(pcs[:, k], pcs[:, k], max_lag = self.max_lag)
                kernel_densities[:, k, 0], kernel_densities[:, k, 1] = kdensity_estimate(pcs[:, k], kernel = 'epanechnikov')
            integral_corr_timescale = np.sum(np.abs(lag_cors), axis = 0)

            # init for integrations
            lag_cors_int = np.zeros([n_realizations] + list(lag_cors.shape))
            kernel_densities_int = np.zeros([n_realizations] + list(kernel_densities.shape))
            stat_moments_int = np.zeros((4, n_realizations, pcs.shape[1])) # mean, variance, skewness, kurtosis
            int_corr_scale_int = np.zeros((n_realizations, pcs.shape[1]))

        self.diagpc = np.diag(np.std(pcs, axis = 0, ddof = 1))
        self.maxpc = np.amax(np.abs(pcs))
#.........这里部分代码省略.........
开发者ID:jajcayn,项目名称:multi-scale,代码行数:103,代码来源:empirical_model.py

示例3: genseq

# 需要导入模块: from pathos.multiprocessing import ProcessingPool [as 别名]
# 或者: from pathos.multiprocessing.ProcessingPool import close [as 别名]

def genseq(idx):

    first = np.where(np.random.multinomial(1,pvals=pops)==1)[0][0]
    last = first
    last_ts = datetime.now()
    result = {'artist_idx':[first],'ts':[last_ts]}
    for i in xrange(seq_length-1):
        next_listen = draw(last)
        last = next_listen
        gap_bin = 120*np.where(np.random.multinomial(1,pvals=td)==1)[0][0]
        gap = np.random.randint(gap_bin,gap_bin+120)
        result['artist_idx'].append(next_listen)
        new_ts = last_ts+timedelta(0,gap)
        result['ts'].append(new_ts)
        last_ts = new_ts

    df = pd.DataFrame(result)
    df['block'] = ((df['artist_idx'].shift(1) != df['artist_idx']).astype(int).cumsum())-1
    df.to_pickle(str(idx)+'.pkl')
    logging.info('idx {} complete'.format(idx))

pool = Pool(cpu_count())
indices = range(n)
pool.map(genseq,indices)
pool.close()



开发者ID:jlorince,项目名称:MusicForaging,代码行数:28,代码来源:generate_seqs.py

示例4: setup

# 需要导入模块: from pathos.multiprocessing import ProcessingPool [as 别名]
# 或者: from pathos.multiprocessing.ProcessingPool import close [as 别名]

#.........这里部分代码省略.........
                            for fi in glob(self.args.pickledir + "*.pkl")
                            if "_patches_" not in fi and fi.startswith(self.args.prefix_output)
                        ]
                    )
                else:
                    done = set()
                files = [
                    fi
                    for fi in glob(self.args.pickledir + "*.pkl")
                    if "_patches_" not in fi
                    and fi.startswith(self.args.prefix_input)
                    and self.userFromFile(fi) not in done
                ]

            self.n_files = len(files)

            self.rootLogger.debug(files)

            func_partial = partial(
                self.processor,
                output_dir=self.args.pickledir,
                is_sorted=True,
                features=self.features,
                dist=self.args.distance_metric,
                session_threshold=self.args.session_thresh,
                dist_threshold=self.args.dist_thresh,
                min_patch_length=self.args.min_patch_length,
                artist_idx_feature_map=self.artist_idx_feature_map,
            )

            self.pool = Pool(self.args.n)
            self.rootLogger.info("Pool started")
            self.pool.map(func_partial, files)
            self.pool.close()
            self.rootLogger.info("Pool closed")

    # Jensen Shannon Distance (Sqrt of Jensen Shannon Divergence)
    @staticmethod
    def JSD(P, Q):
        if np.all(np.isnan(P)) or np.all(np.isnan(Q)):
            return np.nan
        _P = P / norm(P, ord=1)
        _Q = Q / norm(Q, ord=1)
        _M = 0.5 * (_P + _Q)
        return np.sqrt(np.clip(0.5 * (entropy(_P, _M) + entropy(_Q, _M)), 0, 1))

    # Calculate distance between any two feature arrays
    def calc_dist(self, idx_1, idx_2, metric="cosine"):
        features1 = self.get_features(idx_1)
        features2 = self.get_features(idx_2)
        if np.any(np.isnan(features1)) or np.any(np.isnan(features2)):
            return np.nan
        if np.all(features1 == features2):
            return 0.0
        if metric == "JSD":
            return self.JSD(features1, features2)
        elif metric == "cosine":
            return cosine(features1, features2)
        elif metric == "euclidean":
            return euclidean(features1, features2)

    # "s -> (s0,s1), (s1,s2), (s2, s3), ..."
    @staticmethod
    def pairwise(iterable):
        a, b = tee(iterable)
        next(b, None)
开发者ID:jlorince,项目名称:MusicForaging,代码行数:70,代码来源:setup.py


注:本文中的pathos.multiprocessing.ProcessingPool.close方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。