当前位置: 首页>>代码示例>>Python>>正文


Python Parallel.append方法代码示例

本文整理汇总了Python中joblib.Parallel.append方法的典型用法代码示例。如果您正苦于以下问题:Python Parallel.append方法的具体用法?Python Parallel.append怎么用?Python Parallel.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在joblib.Parallel的用法示例。


在下文中一共展示了Parallel.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: analysis

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
    def analysis(self, permute=False):
        """
        Classify based an iteratively increasing the number of features (electrodes) included in the model. Starts with
        the single best electrode (N=1) and increase until N = the number of electrodes.

        Note: permute is not used in this analysis, but kept to match the same signature as super.
        """
        if self.subject_data is None:
            print('%s: compute or load data first with .load_data()!' % self.subject)

        # Get recalled or not labels
        if self.recall_filter_func is None:
            print('%s classifier: please provide a .recall_filter_func function.' % self.subject)
        y = self.recall_filter_func(self.subject_data)

        # zscore the data by session
        x = self.zscore_data()

        # create the classifier
        classifier = LogisticRegression(C=self.C, penalty=self.norm, solver='liblinear')

        # create .num_rand_splits of cv_dicts
        cv_dicts = [self._make_cross_val_labels() for _ in range(self.num_rand_splits)]

        # run permutations with joblib
        f = _par_compute_and_run_split
        if self.use_joblib:
            aucs = Parallel(n_jobs=12, verbose=5)(delayed(f)(cv, classifier, x, y) for cv in cv_dicts)
        else:
            aucs = []
            for cv in tqdm(cv_dicts):
                aucs.append(f(cv, classifier, x, y))

        # store results
        self.res['auc_x_n'] = np.stack(aucs)
开发者ID:jayfmil,项目名称:TH_python,代码行数:37,代码来源:subject_classifier_using_n_features.py

示例2: auto_choose

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def auto_choose(actionfile, new_xyz, nparallel=-1):
    """
    @param demofile: h5py.File object
    @param new_xyz : new rope point-cloud
    @nparallel     : number of parallel jobs to run for tps cost calculaion.
                     If -1 only 1 job is used (no parallelization).
    
    @return          : return the name of the segment with the lowest warping cost.
    """
    if not nparallel == -1:
        from joblib import Parallel, delayed
        nparallel = min(nparallel, 8)

    demo_data = actionfile.items()

    if nparallel != -1:
        before = time.time()
        redprint("auto choose parallel with njobs = %d"%nparallel)
        costs  = Parallel(n_jobs=nparallel, verbose=0)(delayed(registration_cost)(ddata[1]['cloud_xyz'][:], new_xyz) for ddata in demo_data)
        after  = time.time()
        print "Parallel registration time in seconds =", after - before
    else:
        costs = []
        redprint("auto choose sequential..")
        for i, ddata in enumerate(demo_data):
            costs.append(registration_cost(ddata[1]['cloud_xyz'][:], new_xyz))
            print(("tps-cost completed %i/%i" % (i + 1, len(demo_data))))

    ibest = np.argmin(costs)
    redprint ("auto choose returning..")
    return demo_data[ibest][0]
开发者ID:rishabh-battulwar,项目名称:human_demos,代码行数:33,代码来源:do_task_floating_temp.py

示例3: main

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def main():
    parser = argparse.ArgumentParser(description='Register & align images')
    parser.add_argument('filenames',nargs='+',help='List of target files to register. Images are aligned to first in list.')
    parser.add_argument('-odir',metavar='outdir',required=True,type=str,help='Output directory for files.')
    parser.add_argument('-m',metavar='method',choices=('point','extended'),default='extended',help='Specify alignment method (point or extended); default=extended.')
    parser.add_argument('-xy',nargs=2,type=float,default=None,help='Specify approximate "x y" pixel coordinate of object to centroid on.  Required for point mode; useful for extended mode (default=center of image).')
    parser.add_argument('-box',nargs=2,type=int,default=None,help='Specify box size (w h) to restrict alignment search.  Useful for both point & extended modes (default=full size of array).')
    parser.add_argument('--c',action='store_true',help='Clobber (overwrite) on output')
    parser.add_argument('-njobs',type=int,default=1,help='Process images in parallel. "-1" is all CPUs (default=1).')
    
    args = parser.parse_args()

    if args.m == 'point' and args.xy is None:
        parser.error("-m point requires -xy coordinate")

    # create output directory
    if args.odir not in ['','.']:
        makedirs(args.odir,exist_ok=True)

    # align all images to first filename
    ref = args.filenames[0]
    align = args.filenames[1:]

    imref = partial(register,ref=ref,outdir=args.odir,
                    method=args.m,center=args.xy,size=args.box,
                    overwrite=args.c)
    
    outfiles = Parallel(n_jobs=args.njobs,verbose=11)(delayed(imref)(toshift=a) for a in align)

    # Write ref to outdir
    refnew = os.path.join(args.odir,os.path.basename(ref))
    copy(ref,refnew)

    outfiles.append(refnew)
    print('Wrote %i files to %s' % (len(outfiles), args.odir))
开发者ID:msgordon,项目名称:optipol-reduc,代码行数:37,代码来源:imalign.py

示例4: find_closest_auto

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def find_closest_auto(demofile, new_xyz):
    if args.parallel:
        from joblib import Parallel, delayed
    demo_clouds = [asarray(seg["cloud_xyz"]) for seg in demofile.values()]
    keys = demofile.keys()
    if args.parallel:
        costs = Parallel(n_jobs=3,verbose=100)(delayed(registration_cost)(demo_cloud, new_xyz) for demo_cloud in demo_clouds)
    else:
        costs = []
        for (i,ds_cloud) in enumerate(demo_clouds):
            costs.append(registration_cost(ds_cloud, new_xyz))
            print "completed %i/%i"%(i+1, len(demo_clouds))
    
    print "costs\n",costs
    if args.show_neighbors:
        nshow = min(5, len(keys))
        import cv2, rapprentice.cv_plot_utils as cpu
        sortinds = np.argsort(costs)[:nshow]
        near_rgbs = [asarray(demofile[keys[i]]["rgb"]) for i in sortinds]
        bigimg = cpu.tile_images(near_rgbs, 1, nshow)
        cv2.imshow("neighbors", bigimg)
        print "press any key to continue"
        cv2.waitKey()
        
    ibest = np.argmin(costs)
    return keys[ibest]
开发者ID:warriorarmentaix,项目名称:rapprentice,代码行数:28,代码来源:do_task_ee.py

示例5: basic_compute_loop

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def basic_compute_loop(compute_function,looper,run_parallel=True,debug=False):
	"""Canonical form of the basic compute loop."""
	start = time.time()
	if run_parallel:
		incoming = Parallel(n_jobs=8,verbose=10 if debug else 0)(
			delayed(compute_function,has_shareable_memory)(**looper[ll]) 
			for ll in framelooper(len(looper),start=start))
	else: 
		incoming = []
		for ll in framelooper(len(looper)):
			incoming.append(compute_function(**looper[ll]))
	return incoming
开发者ID:ejjordan,项目名称:analyo,代码行数:14,代码来源:plot-hbonds_contacts.py

示例6: auto_choose

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def auto_choose(actionfile, new_xyz, softmin_k = 1, softmin_alpha = 1, nparallel=-1):
    """
    @param demofile  : h5py.File object
    @param new_xyz   : new rope point-cloud
    @param softmin   : use softmin distribution over first <softmin> demonstrations
                       set to 1 for nearest neighbor
    @param nparallel : number of parallel jobs to run for tps cost calculaion
                       set to -1 for no parallelization
    
    @return          : return the name of the segment with the lowest warping cost.
    """
    if not nparallel == -1:
        from joblib import Parallel, delayed
        nparallel = min(nparallel, 8)

    demo_data = actionfile.items()

    if nparallel != -1:
        before = time.time()
        redprint("auto choose parallel with njobs = %d"%nparallel)
        costs  = Parallel(n_jobs=nparallel, verbose=100)(delayed(registration_cost)(ddata[1]['cloud_xyz'][:], new_xyz) for ddata in demo_data)
        after  = time.time()
        print "Parallel registration time in seconds =", after - before
    else:
        costs = []
        redprint("auto choose sequential..")
        for i, ddata in enumerate(demo_data):
            costs.append(registration_cost(ddata[1]['cloud_xyz'][:], new_xyz))
            print(("tps-cost completed %i/%i" % (i + 1, len(demo_data))))
    
    # use a random draw from the softmin distribution
    demo_costs = zip(costs, demo_data)
    if softmin_k == 1:
        ibest = np.argmin(costs)
        return demo_data[ibest][0]
    best_k_demos = np.asarray(sorted(demo_costs)[:softmin_k])
    best_k_exps = np.exp(-1*softmin_alpha*float(best_k_demos[:, 0]))  #multiply by -1 b/c we're actually min-ing
    if len(best_k_exps) > 1:
        denom = sum(best_k_exps)
    else:
        denom = best_k_exps
    mass_fn = best_k_exps/denom

    draw = random.random()
    for i in range(best_k_demos):
        if draw <= mass_fn[i]:
            ret_val = demo_data[i][0]
            break
        draw -= mass_fn[i]
    
    redprint ("auto choose returning..")
    return ret_val
开发者ID:dhadfieldmenell,项目名称:bootstrapping-lfd,代码行数:54,代码来源:do_task_floating.py

示例7: train

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
	def train(self):
		regressors = []
		if self.parallel:
			regressors = Parallel(n_jobs=-1)(delayed(trainBin)(self.params[b], np.atleast_2d(self.ind).T, self.dep[b],self.indWeights) for b in self.OD.bins)
		else:
			for b in self.OD.bins:
				regressors.append(trainBin(self.params[b],np.atleast_2d(self.ind).T, self.dep[b],self.indWeights))
				#self.svr[b] = SVR(cache_size=1000,kernel='rbf', C=self.params[b]['C'], gamma=self.params[b]['gamma'])
				#self.svr[b].fit(np.array([self.ind]).T,self.dep[b])
				
		
		for i,model in enumerate(regressors):
			self.svr[self.OD.bins[i]] = model
开发者ID:Kazjon,项目名称:SurpriseEval,代码行数:15,代码来源:ED.py

示例8: run_all

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def run_all(cnf, samples, process_one, finalize_one, finalize_all):
    if len(samples) == 1:
        sample_name, sample_cnf = samples.items()[0]
        run_one(sample_cnf, process_one, finalize_one)
    else:
        results = []
        if cnf.get('parallel'):
            try:
                from joblib import Parallel, delayed
            except ImportError:
                critical(
                    '\nERROR: Joblib not found. You may want samples to be processed '
                    'in parallel, in this case, make sure python joblib intalled. '
                    '(pip install joblib).')
            else:
                for sample_name, sample_cnf in samples.items():
                    sample_cnf['verbose'] = False

                results = Parallel(n_jobs=len(samples)) \
                    (delayed(run_one)(sample_cnf, process_one, finalize_one,
                                      multiple_samples=True)
                        for sample_name, sample_cnf in samples.items())
        else:
            results = []
            for sample_name, sample_cnf in samples.items():
                results.append(
                    run_one(sample_cnf, process_one, finalize_one,
                            multiple_samples=True))

        if samples:
            info('')
            info('*' * 70)
            info('Results for each sample:')
            finalize_all(cnf, samples, results)

    # Cleaning
    for name, data in samples.items():
        work_dirpath = data['work_dir']
        tx_dirpath = join(work_dirpath, 'tx')

        if isdir(tx_dirpath):
            shutil.rmtree(tx_dirpath)

        if not data.get('keep_intermediate') \
                and isdir(work_dirpath):
            shutil.rmtree(work_dirpath)
开发者ID:mjafin,项目名称:variantannotation,代码行数:48,代码来源:runner.py

示例9: auto_choose

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def auto_choose(demofile, new_xyz, only_original_segments):
    """
    @param demofile:
    @param new_xyz:
    @param only_original_segments: if true, then only the original_segments will be registered with
    @return:
    """
    import pprint

    """Return the segment with the lowest warping cost. Takes about 2 seconds."""
    parallel = True
    if parallel:
        from joblib import Parallel, delayed
    items = demofile.items()
    if only_original_segments:
        #remove all derived segments from items
        print("Only registering with the original segments")
        items = [item for item in items if not "derived" in item[1].keys()]
    unzipped_items = zip(*items)
    keys = unzipped_items[0]
    values = unzipped_items[1]
    ds_clouds, shapes = get_downsampled_clouds(values)
    ds_new = clouds.downsample(new_xyz, 0.01 * DS_SIZE)
    #print 'ds_new_len shape', ds_new.shape
    if parallel:
        before = time.time()
        #TODO: change back n_jobs=12 ?
        costs = Parallel(n_jobs=8, verbose=0)(delayed(registration_cost)(ds_cloud, ds_new) for ds_cloud in ds_clouds)
        after = time.time()
        print "Parallel registration time in seconds =", after - before
    else:
        costs = []
        for (i, ds_cloud) in enumerate(ds_clouds):
            costs.append(registration_cost(ds_cloud, ds_new))
            print(("completed %i/%i" % (i + 1, len(ds_clouds))))
            #print(("costs\n", costs))
    ibest = np.argmin(costs)
    print "ibest = ", ibest
    #pprint.pprint(zip(keys, costs, shapes))
    #print keys
    print "best key = ", keys[ibest]
    print "best cost = ", costs[ibest]
    return keys[ibest]
开发者ID:dhadfieldmenell,项目名称:bootstrapping-lfd,代码行数:45,代码来源:do_task.py

示例10: find_TADs

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
 def find_TADs(self, data, gammalist=range(10, 110, 10), segmentation='potts',
               minlen=3, drop_gamma=False, n_jobs='auto'):
     '''
     Finds TADs in data with a list of gammas. Returns a pandas DataFrame
     with columns 'Start', 'End' and 'Gamma'. Use genome_intervals_to_chr on
     the returned object to get coordinates in bed-style format and not in
     coordinates of concatenated genome.
     If *drop_gamma*, drops the 'Gamma' column (useful when using 1 gamma)
     '''
     raise DeprecationWarning('Will be deprecated or rewritten to use'\
                             'lavaburst: github.com/nezar-compbio/lavaburst')
     if n_jobs is 'auto': #Empirical values on my computer; with >8 Gb memory try increasing n_jobs
         if segmentation == 'potts':
             n_jobs = 3
         elif segmentation == 'armatus':
             n_jobs = 6
     if ~np.isfinite(data).any():
         print 'Non-finite values in data, substituting them with zeroes'
         data[~np.isfinite(data)] = 0
     Wcomm, Wnull, pass_mask, length = _precalculate_TADs_in_array(data)
     f = _calculate_TADs
     if n_jobs >= 1:
         from joblib import Parallel, delayed
         domains = Parallel(n_jobs=n_jobs, max_nbytes=1e6)(
                           delayed(f)(Wcomm, Wnull, pass_mask, length, g, segmentation)
                                                                    for g in gammalist)
     elif n_jobs is None or n_jobs == False or n_jobs == 0:
         domains = []
         for g in gammalist:
             domains_g = f(Wcomm, Wnull, pass_mask, length, g, segmentation)
             domains.append(domains_g)
     domains = pd.concat(domains, ignore_index=True)
     domains = domains.query('End-Start>='+str(minlen)).copy()
     domains = domains.sort(columns=['Gamma', 'Start', 'End'])
     domains.reset_index(drop=True, inplace=True)
     domains[['Start', 'End']] = domains[['Start', 'End']].astype(int)
     domains[['Start', 'End']] *= self.resolution
     domains = domains[['Start', 'End', 'Score', 'Gamma']]
     if drop_gamma:
         domains.drop('Gamma', axis=1, inplace=True)
     domains = self.genome_intervals_to_chr(domains).reset_index(drop=True)
     return domains
开发者ID:Phlya,项目名称:hicplotlib,代码行数:44,代码来源:GenomicIntervals.py

示例11: main

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def main():
    """
    Main function.

    1. Setup logging
    2. Get arguments
    3. Get index
    4. Process files
    5. Write output
    """

    setup_logging()

    logger = logging.getLogger("stats." + __name__)

    args = get_args()

    index = get_index(args)

    logger.warning("Positions not in annotation will be ignored.")

    logger.info("Found " + str(len(args.inputs)) + " input file(s):")
    for input_file in sorted(args.inputs):
        logger.debug(input_file)

    if args.is_parallel:
        stats = Parallel(n_jobs=args.parallel,
                         verbose=100,
                         batch_size=1)(delayed(process_file)(input_file,
                                                             args.type,
                                                             index,
                                                             args.is_parallel)
                                       for input_file in args.inputs)
    else:
        stats = []
        for input_file in args.inputs:
            output_table = process_file(input_file, args.type, index,
                                        args.is_parallel)
            stats.append(output_table)

    write_stats(args.out, stats)
开发者ID:lazappi,项目名称:binf-scripts,代码行数:43,代码来源:alignStats.py

示例12: findPeaks

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def findPeaks(imgdict, maplist, params, maptype="ccmaxmap", pikfile=True):
	peaktreelist = []
	count = 0

	thresh =    float(params["thresh"])
	bin =       int(params["bin"])
	diam =      float(params["diam"])
	apix =      float(params["apix"])
	olapmult =  float(params["overlapmult"])
	maxpeaks =  int(params["maxpeaks"])
	maxthresh = params["maxthresh"]
	maxsizemult = float(params["maxsize"])
	peaktype =  params["peaktype"]
	msg =       not params['background']
	pixdiam =   diam/apix/float(bin)
	pixrad =    diam/apix/2.0/float(bin)

	numpyVersion = float(numpy.version.version[:3])
	if numpyVersion > 1.7:
		peaktreelist = Parallel(n_jobs=params['nproc'])(delayed(runFindPeaks)(params,
			maplist,maptype,pikfile,thresh,pixdiam,count,olapmult,maxpeaks,maxsizemult,
			msg,bin,peaktype,pixrad,imgdict) for count in range(0,len(maplist)))
	else:
		## backup for AttributeError: 'memmap' object has no attribute 'offset', bug #3322
		peaktreelist = []
		for count in range(0,len(maplist)):
			mappeaktree = runFindPeaks(params,maplist,maptype,pikfile,thresh,pixdiam,count,olapmult,
				maxpeaks,maxsizemult,msg,bin,peaktype,pixrad,imgdict)
			peaktreelist.append(mappeaktree)

	peaktree = mergePeakTrees(imgdict, peaktreelist, params, msg, pikfile)

	#max threshold
	if maxthresh is not None:
		precount = len(peaktree)
		peaktree = maxThreshPeaks(peaktree, maxthresh)
		postcount = len(peaktree)
		#if precount != postcount:
		apDisplay.printMsg("Filtered %d particles above threshold %.2f"%(precount-postcount,maxthresh))

	return peaktree
开发者ID:leschzinerlab,项目名称:myami-3.2-freeHand,代码行数:43,代码来源:apPeaks.py

示例13: pmultiquery

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]

#.........这里部分代码省略.........
        num_cores = num_proc

    # make sure quicksaves are right type
    if quicksave is True:
        raise ValueError("quicksave must be string when using pmultiquery.")

    # the options that don't change
    d = {
        "option": option,
        #'paralleling': True,
        "function": "interrogator",
        "root": root,
        "note": note,
        "denominator": denom,
    }
    # add kwargs to query
    for k, v in kwargs.items():
        d[k] = v

    # make a list of dicts to pass to interrogator,
    # with the iterable unique in every one
    ds = []
    if multiple_corpora:
        path = sorted(path)
        for index, p in enumerate(path):
            name = os.path.basename(p)
            a_dict = dict(d)
            a_dict["path"] = p
            a_dict["query"] = query
            a_dict["outname"] = name
            a_dict["just_speakers"] = just_speakers
            a_dict["paralleling"] = index
            a_dict["printstatus"] = False
            ds.append(a_dict)
    elif multiple_queries:
        for index, (name, q) in enumerate(query.items()):
            a_dict = dict(d)
            a_dict["path"] = path
            a_dict["query"] = q
            a_dict["outname"] = name
            a_dict["just_speakers"] = just_speakers
            a_dict["paralleling"] = index
            a_dict["printstatus"] = False
            ds.append(a_dict)
    elif multiple_option:
        for index, (name, q) in enumerate(function_filter.items()):
            a_dict = dict(d)
            a_dict["path"] = path
            a_dict["query"] = query
            a_dict["outname"] = name
            a_dict["just_speakers"] = just_speakers
            a_dict["paralleling"] = index
            a_dict["function_filter"] = q
            a_dict["printstatus"] = False
            ds.append(a_dict)
    elif multiple_speakers:
        for index, name in enumerate(just_speakers):
            a_dict = dict(d)
            a_dict["path"] = path
            a_dict["query"] = query
            a_dict["outname"] = name
            a_dict["just_speakers"] = [name]
            a_dict["function_filter"] = function_filter
            a_dict["paralleling"] = index
            a_dict["printstatus"] = False
            ds.append(a_dict)
开发者ID:hakumiogin,项目名称:corpkit,代码行数:70,代码来源:multiprocess.py

示例14: pmultiquery

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]

#.........这里部分代码省略.........
        num_cores = 1

    # make sure quicksaves are right type
    if quicksave is True:
        raise ValueError('quicksave must be string when using pmultiquery.')
    
    # the options that don't change
    d = {
         #'paralleling': True,
         'function': 'interrogator',
         'root': root,
         'note': note,
         'denominator': denom}
    
    # add kwargs to query
    for k, v in list(kwargs.items()):
        d[k] = v

    # make a list of dicts to pass to interrogator,
    # with the iterable unique in every one
    ds = []
    if multiple_corpora:
        for index, p in enumerate(corpus):
            name = p.name
            a_dict = dict(d)
            a_dict['corpus'] = p
            a_dict['search'] = search
            a_dict['query'] = query
            a_dict['show'] = show
            a_dict['outname'] = name.replace('-parsed', '')
            a_dict['just_speakers'] = just_speakers
            a_dict['paralleling'] = index
            a_dict['printstatus'] = False
            ds.append(a_dict)
    elif multiple_queries:
        for index, (name, q) in enumerate(query.items()):
            a_dict = dict(d)
            a_dict['corpus'] = corpus
            a_dict['search'] = search
            a_dict['query'] = q
            a_dict['show'] = show
            a_dict['outname'] = name
            a_dict['just_speakers'] = just_speakers
            a_dict['paralleling'] = index
            a_dict['printstatus'] = False
            ds.append(a_dict)
    elif multiple_option:
        for index, (name, q) in enumerate(function_filter.items()):
            a_dict = dict(d)
            a_dict['corpus'] = corpus
            a_dict['search'] = search
            a_dict['query'] = query
            a_dict['show'] = show
            a_dict['outname'] = name
            a_dict['just_speakers'] = just_speakers
            a_dict['paralleling'] = index
            a_dict['function_filter'] = q
            a_dict['printstatus'] = False
            ds.append(a_dict)
    elif multiple_speakers:
        for index, name in enumerate(just_speakers):
            a_dict = dict(d)
            a_dict['corpus'] = corpus
            a_dict['search'] = search
            a_dict['query'] = query
            a_dict['show'] = show
开发者ID:nkhuyu,项目名称:corpkit,代码行数:70,代码来源:multiprocess.py

示例15: mab_eval

# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import append [as 别名]
def mab_eval(bandit, T, pol_cfg, N_trials=100, seed=None, parallel=False):
    if seed is not None:
        np.random.seed(seed)

    all_policies = extract_policies(**pol_cfg)
    policies = []
    for p in all_policies:
        if p.name in pol_cfg['names']:
            policies.append(p)
    names = [p.name for p in policies]

    arm_dists = [bandit.resample_arms() for _ in range(N_trials)]
    results = []
    print 'Evaluating Policies {}'.format(names)
    if parallel == 1:                   

        rc = ipp.Client(profile='ssh')

        dv = rc[:]
        n_clients = len(dv)
        with dv.sync_imports():
            import mab
        v = rc.load_balanced_view()
        
        results = v.map(eval_helper, arm_dists, [bandit.arm_prior] * N_trials, 
                        [T]*N_trials, [pol_cfg]*N_trials, [frozenset(names)] * N_trials,
                        [seed + inum for inum in range(N_trials)])

        start = time.time()        
        while rc.outstanding:
            try:
                rc.wait(rc.outstanding, 1e-1)
            except ipp.TimeoutError:
                # ignore timeouterrors
                pass
            n_complete = N_trials - len(rc.outstanding)
            if n_complete > 0:
                est_remaining = ((time.time() - start) / n_complete) * len(rc.outstanding)
            else:
                est_remaining = 'No Estimate'
            sys.stdout.write('\rFinished {} / {} jobs\tEstimated Time Remaining: {}'.format(n_complete, N_trials, est_remaining))
            sys.stdout.flush()
    elif parallel == 2:
        from joblib import Parallel, delayed
        results = Parallel(n_jobs=7, verbose=50)(delayed(_eval_helper)(
                ad, bandit.arm_prior, T, pol_cfg, names, seed + inum) for 
                inum, ad in enumerate(arm_dists))
    else:

        for inum, ad in enumerate(arm_dists):
            results.append(eval_helper(ad, bandit.arm_prior, T, pol_cfg, names, seed=seed+inum))
            sys.stdout.write("{} / {}\t".format(inum, N_trials))
            sys.stdout.flush()
    means = []
    variances = []
    avg_err = []
    discounted_mean = []
    for j in range(len(policies)):
        try:
            regrets, choices, discounted = results[0].get()
        except CompositeError, e:
            print e
            import IPython; IPython.embed()

        regrets = regrets[j]
        choices = choices[j]
        discounted = discounted[j]
        errors = np.array(choices != bandit.ibest, dtype=np.int)
        for i in range(1, N_trials):
            regrets_i, choices, discounted_i = results[i].get()
            regrets = np.c_[regrets, regrets_i[j]]
            errors += (choices[j] != bandit.ibest)
            discounted += discounted_i[j]
        discounted /= N_trials
        discounted_mean.append(discounted)
        means.append(np.mean(regrets, axis=1))
        variances.append(np.var(regrets, axis=1))
        avg_err.append(errors / N_trials)
开发者ID:dhadfieldmenell,项目名称:factored-dm,代码行数:80,代码来源:eval_mab.py


注:本文中的joblib.Parallel.append方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。