当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.log_to_stderr函数代码示例

本文整理汇总了Python中multiprocessing.log_to_stderr函数的典型用法代码示例。如果您正苦于以下问题:Python log_to_stderr函数的具体用法?Python log_to_stderr怎么用?Python log_to_stderr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了log_to_stderr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
  multiprocessing.log_to_stderr()
  logger = multiprocessing.get_logger()
  logger.setLevel(logging.INFO)
  pool = RedisPool(30)
  p = WorkerEngine(pool, 'Fetcher', 25)
  p.start(logger)
开发者ID:gearnode,项目名称:pyq,代码行数:7,代码来源:worker.py

示例2: SumPixels

def SumPixels(file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = TiffDatatArray()
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        new_sum = data_array.GetDataArray()
        roi_sums = numpy.add(roi_sums, new_sum)

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
开发者ID:roehrig,项目名称:XrayDiffractionDataTool,代码行数:25,代码来源:CustomThreads.py

示例3: test_log

def test_log():
    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)
    t1 = time.time()
    print(time.time() - t1)
    logger.info("done")
开发者ID:cuijiabin,项目名称:python_text,代码行数:7,代码来源:mysql2mongo.py

示例4: init_logging

def init_logging():
    """Initialize logging system."""

    debug = '--debug' in sys.argv
    level = logging.DEBUG if debug else logging.INFO
    multiprocessing.log_to_stderr()
    logger.setLevel(level)
开发者ID:takumainoue,项目名称:.emacs.d,代码行数:7,代码来源:tern_django.py

示例5: __init__

    def __init__(self,cmd_param):
        '''
        主进程类 为了兼容windows而这样创建类
        '''
        super(Mtask,self).__init__()
        self.start_time = datetime.datetime.now()
        self.parent_timeout = 180
        self.parent_timeout_flag = 0
        self.child_timeout =120
        self.child_num = 10
        self.slice_num = 20
        self.process_list = []
        self.result =[]
        self.batch_id = 0
        self.print_flag = 1
        self.mult_debug_flag = 0
        self.cmd_param = cmd_param
       


        if self.mult_debug_flag:
            #设置进程log日志
            multiprocessing.log_to_stderr()
            logger=multiprocessing.get_logger()
            logger.setLevel(logging.INFO)
开发者ID:cesaiskra,项目名称:hosts_manage,代码行数:25,代码来源:Copy+of+mtask.py

示例6: run_prevalence

def run_prevalence(out_dir, remove_rt, working_dir, produce_prevalence):
    """
    Create exact prevalences over a list of DRMs provided by a sequence
    object when provided with simulated resistant and susceptible files.

    Args:
        out_dir: The final folder into which to place the anonymized files.
        remove_rt: Are we removing a piece of the RT gene?
        working_dir: The folder in which to place temporary files.
        produce_prevalence: what prevalence to produce

    Returns:
        True on completion.
    """

    print "Building exact prevalences."

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    csv_rows = []
    threads = []

    with open(os.path.join(working_dir, error_filename), 'rb') as handle:
        error_data = pickle.load(handle)
        platform = error_data['platform']
        paired_end = error_data['paired_end']

        manifest_queue = multiprocessing.Manager().Queue()
        multiprocessing.log_to_stderr()
        p = LoggingPool(len(error_data['files']))
    
        for result in error_data['files']:

            if remove_rt:
                result['sequence'].remove_rt = True

            process_result = p.apply_async(run_prevalence_thread, [
                manifest_queue, platform, paired_end,
                    result, working_dir, out_dir, produce_prevalence
            ])

        p.close()
        p.join()

        with open(os.path.join(out_dir, 
            final_csv_filename), 'w') as csv_handle,\
            open(os.path.join(out_dir, 
            final_json_filename), 'w') as json_handle:
            csv_handle.write(','.join(csv_header_rows) + '\n')
            test = sample.header(plat.platforms[platform],
                paired_end)
            test['samples'] = []
            while not manifest_queue.empty():
                s = manifest_queue.get()
                test['samples'].append(s.encode(plat.platforms[platform]))
                csv_handle.write(s.dump_csv())
            json_handle.write(json.dumps(test, indent=2))
                
    return True
开发者ID:hyraxbio,项目名称:simulated-data,代码行数:60,代码来源:run_simulation.py

示例7: SumData

def SumData(roi_start_list, roi_end_list, file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = XYDataArray()
    num_rois = len(roi_start_list)
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        for j in range(num_rois):
#           logger.info("Summing roi %d from file %d" % (j, i))
            roi_sums[j][i] = roi_sums[j][i] + data_array.SumROIData(roi_start_list[j], roi_end_list[j])

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
开发者ID:roehrig,项目名称:XrayDiffractionDataTool,代码行数:27,代码来源:CustomThreads.py

示例8: setup_logger

def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
        format=conf.LOG_FORMAT, **kwargs):
    """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
    ``stderr`` is used.

    Returns logger object.
    """
    if not _monkeypatched[0]:
        monkeypatch()
        _monkeypatched[0] = True

    logger = get_default_logger(loglevel=loglevel)
    if logger.handlers:
        # Logger already configured
        return logger
    if logfile:
        if hasattr(logfile, "write"):
            log_file_handler = logging.StreamHandler(logfile)
        else:
            log_file_handler = logging.FileHandler(logfile)
        formatter = logging.Formatter(format)
        log_file_handler.setFormatter(formatter)
        logger.addHandler(log_file_handler)
    else:
        import multiprocessing
        multiprocessing.log_to_stderr()
    return logger
开发者ID:miracle2k,项目名称:celery,代码行数:27,代码来源:log.py

示例9: init

	def init(self, nprocs=None, spawnonce=True):
		multiprocessing.log_to_stderr(logging.WARN)
		if nprocs is None:
			self.nprocs = multiprocessing.cpu_count()
		else:
			self.nprocs = multiprocessing.cpu_count() + nprocs if nprocs < 0 else nprocs
		self.proc = []
		self.spawnonce = spawnonce
开发者ID:matthewwardrop,项目名称:python-parampy,代码行数:8,代码来源:symmetric.py

示例10: main

def main():
	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads =1
	print('Threads to process :',threads)
	multiprocessing.log_to_stderr()
	logger =multiprocessing.get_logger()
	logger.setLevel(logging.INFO)

#	while True:
 	Threadstart(int(threads))
开发者ID:iyersv,项目名称:TestRepo,代码行数:12,代码来源:kafka_weather_stations_consumer.py

示例11: which_host

    def which_host(self, urllist, attr):
        """check every url in the given list against all regular expressions
        and extract the value of the chosen html attribute.
        Then use a queue and enough processes to download all matched urls"""
        # make a queue and enough processes as numprocs
        self.q = Queue()
        self.ps = (Process(target=self.use_queue, args=()) for i in range(self.numprocs))

        # enable multiprocessing logging feature
        if debug:
            logger.setLevel(logging.DEBUG)
            log_to_stderr(logging.DEBUG)


        for p in self.ps:
            # start all processes
            p.start()
        
        # piping the urllist urls into a set to purge duplicates
        finalset = set()
        for L in urllist:
            self.stringl = L.get(attr, None)
            # remove the anonym.to string before urls
            if self.stringl.startswith("http://anonym.to/?"):
                self.stringl = re.sub('http://anonym.to/\?', '', self.stringl)
            finalset.add(self.stringl)


        for L in finalset:
            # iterate over the regexp dictionary items; when finding a url
            # matching, put the the class name, url and self.basedir in the queue
            for k, v in regexp_dict.items():
                if k.search(L):
                    self.logger.info("downloading %s" % L)
                    # instantiate and then pass the parse method to the queue.
                    # it downloads but doesn't make the queue do its job
#                    parser = v(L, self.basedir)
#                    self.q.put((parser.parse()))

                    # add the class name and the parameters needed for its __init__
                    # into the queue
                    self.q.put((v, (L, self.basedir)))
                    self.img_counter = self.img_counter + 1
                else:
                    continue

        for i in range(self.numprocs):
            # put a STOP to end the iter builtin inside use_queue
            self.q.put("STOP")

        self.logger.info('%d images were present' % self.img_counter)
        print("%d images were present" % self.img_counter)
开发者ID:Donearm,项目名称:PyImagedownloader,代码行数:52,代码来源:pyimagedownloader.py

示例12: main

def main():
        global csvout
 	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads = 1
	print('Threads to run :',threads)
	
        multiprocessing.log_to_stderr()
	logger = multiprocessing.get_logger()
	logger.setLevel(logging.INFO)
	
	Threadstart(threads)
开发者ID:iyersv,项目名称:TestRepo,代码行数:13,代码来源:kafka_raw_data_consumer.py

示例13: multiprocess

def multiprocess(args):
    step = ((args.token_hi - args.token_lo) / args.worker_count) + 1
    tr1 = range(args.token_lo, args.token_hi, step)  # intermediate points
    tr2 = [(t, t + 1) for t in tr1[1:]]  # add adjacent points
    tr3 = [t for st in tr2 for t in st]  # flatten
    tr4 = [args.token_lo] + tr3 + [args.token_hi]  # add end points
    token_ranges = [tr4[i:i + 2] for i in range(0, len(tr4), 2)]  # make pairs
 
    rate = args.throttle_rate / args.worker_count
 
    multiprocessing.log_to_stderr().setLevel(logging.INFO)
    manager = Manager()
    results = manager.dict()  # create a special shared dict to gather results
 
    workers = [
        Process(
            target=main,
            args=(
                args, worker_index, token_ranges[worker_index], rate, results
            )
        )
        for worker_index in range(args.worker_count)
    ]
 
    os_times_start = os.times()
 
    for worker in workers:
        worker.start()
 
    for worker in workers:
        worker.join()
 
    os_times_stop = os.times()
    exitcode = 0
 
    for worker in workers:
        if worker.exitcode:
            exitcode = worker.exitcode
            break
 
    if results:
        # transform the special dict
        results_dict = analyze_results(results, os_times_stop, os_times_start)
 
        if args.json_output:
            print_json(args, results_dict)
        else:
            print_arguments(args)
            print_results(results_dict)
 
    return(exitcode)
开发者ID:mvallebr,项目名称:cql_record_processor,代码行数:51,代码来源:bm_copy.py

示例14: run_error

def run_error(platform, working_dir, pcr_error, env_error, 
    human_error, paired_end):
    """
    Simulate sequencing error from the output generated by run_diversity.

    Args:
        platform: string One of "roche", "illumina" or "ion".
        working_dir: path The folder in which to place temporary files.
        pcr_error: bool Should we include a PCR error
        env_error: bool Should we include an ENV error
        human_error: bool Should we include human DNA
        paired_end: bool Are we simulating paired_end data?

    Returns:
        True on completion.
    """

    print "Simulating reads from sequence sets."

    error_data = {}
    # properties of the simulation
    error_data['paired_end'] = paired_end
    error_data['platform'] = platform

    with open(os.path.join(working_dir, evolved_filename), 'rb') as handle:

        evolved_data = pickle.load(handle)

        multiprocessing.log_to_stderr()
        p = LoggingPool(len(evolved_data))
        file_queue = multiprocessing.Manager().Queue()
       
        for result in evolved_data:

            p.apply_async(run_error_thread, [
                file_queue, result, platform, working_dir, 
                pcr_error, env_error, human_error, paired_end
            ])

        p.close()
        p.join()

        error_data['files'] = []
        while not file_queue.empty():
            error_data['files'].append(file_queue.get())

    with open(os.path.join(working_dir, error_filename), 'wb') as handle:
        pickle.dump(error_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

    return True
开发者ID:hyraxbio,项目名称:simulated-data,代码行数:50,代码来源:run_simulation.py

示例15: main

def main():

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    if len(sys.argv) >= 4:
        #directory containing fastq library
        fastqsDir = workerForBam.getAbsPath(sys.argv[1])
        
        #directory containing other directories with fasta names
        fastaDir = workerForBam.getAbsPath(sys.argv[2])

        #directory containing file locks
        lockDirPath = workerForBam.getAbsPath(sys.argv[3])
        
        #directory containing temp output -> fastQ's, jobsFile 
        outDir = workerForBam.getAbsPath(sys.argv[4])

        #write all fastq's processing in job file
        combineJobPath = writeCombineBAMJobsFromSAI(outDir, fastqsDir,\
                                                        fastaDir,\
                                                        lockDirPath)

        #call workers to generate paired BAMs from SAIs
        #results = callPairedSAIToBAMWorkers(fastqsDir, fastaDir)
        #print results

    else:
        print 'err: files missing'
开发者ID:mohit-shrma,项目名称:bioinfotools,代码行数:29,代码来源:BAMsFromPairedFastqsSAI.py


注:本文中的multiprocessing.log_to_stderr函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。