当前位置: 首页>>代码示例>>Python>>正文


Python Process.join方法代码示例

本文整理汇总了Python中multiprocessing.Process.join方法的典型用法代码示例。如果您正苦于以下问题:Python Process.join方法的具体用法?Python Process.join怎么用?Python Process.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Process的用法示例。


在下文中一共展示了Process.join方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_output_shape

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def _get_output_shape(model_fn):
    if K.backend() == 'cntk':
        # Create model in a subprocess so that
        # the memory consumed by InceptionResNetV2 will be
        # released back to the system after this test
        # (to deal with OOM error on CNTK backend).
        # TODO: remove the use of multiprocessing from these tests
        # once a memory clearing mechanism
        # is implemented in the CNTK backend.
        def target(queue):
            model = model_fn()
            queue.put(model.output_shape)
        queue = Queue()
        p = Process(target=target, args=(queue,))
        p.start()
        p.join()
        # The error in a subprocess won't propagate
        # to the main process, so we check if the model
        # is successfully created by checking if the output shape
        # has been put into the queue
        assert not queue.empty(), 'Model creation failed.'
        return queue.get_nowait()
    else:
        model = model_fn()
        return model.output_shape
开发者ID:95vidhi,项目名称:keras,代码行数:27,代码来源:applications_test.py

示例2: send_probe_requests

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
开发者ID:BwRy,项目名称:sentrygun,代码行数:27,代码来源:sniffer.py

示例3: apply_update

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def apply_update(fname, status):
    # As soon as python-apt closes its opened files on object deletion
    # we can drop this fork workaround. As long as they keep their files
    # open, we run the code in an own fork, than the files are closed on
    # process termination an we can remount the filesystem readonly
    # without errors.
    p = Process(target=_apply_update, args=(fname, status))
    with rw_access("/", status):
        try:
            t_ver = get_target_version(fname)
        except BaseException:
            status.log('Reading xml-file failed!')
            return

        try:
            c_ver = get_current_version()
        except IOError as e:
            status.log('get current version failed: ' + str(e))
            c_ver = ""

        pre_sh(c_ver, t_ver, status)
        p.start()
        p.join()
        status.log("cleanup /var/cache/apt/archives")
        # don't use execute() here, it results in an error that the apt-cache
        # is locked. We currently don't understand this behaviour :(
        os.system("apt-get clean")
        if p.exitcode != 0:
            raise Exception(
                "Applying update failed. See logfile for more information")
        post_sh(c_ver, t_ver, status)
开发者ID:Linutronix,项目名称:elbe,代码行数:33,代码来源:updated.py

示例4: start_schedulers

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def start_schedulers(options):
    apps = [app.strip() for app in options.scheduler.split(',')]
    try:
        from multiprocessing import Process
    except:
        sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
        return
    processes = []
    code = "from gluon import current;current._scheduler.loop()"
    for app in apps:
        if not check_existent_app(options, app):
            print "Application '%s' doesn't exist, skipping" % (app)
            continue
        print 'starting scheduler for "%s"...' % app
        args = (app,True,True,None,False,code)
        logging.getLogger().setLevel(options.debuglevel)
        p = Process(target=run, args=args)
        processes.append(p)
        print "Currently running %s scheduler processes" % (len(processes))
        p.start()
        print "Processes started"
    for p in processes:
        try:
            p.join()
        except (KeyboardInterrupt, SystemExit):
            print "Processes stopped"
        except:
            p.terminate()
            p.join()
开发者ID:faridsanusi,项目名称:web2py,代码行数:31,代码来源:widget.py

示例5: main

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def main():
    """
    Creates instances of the above methods and occassionally checks for crashed
    worker processes & relaunches.
    """
    worker_process = list()
    get_update_process = Process(target=get_updates)
    get_update_process.start()
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = ThreadProcess(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = ThreadProcess(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()
开发者ID:arcueidB,项目名称:hitagibot,代码行数:32,代码来源:hitagi.py

示例6: fn_with_timeout

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
        def fn_with_timeout(*args, **kwargs):
            conn1, conn2 = Pipe()
            kwargs['_conn'] = conn2
            th = Process(target=fn, args=args, kwargs=kwargs)
            th.start()
            if conn1.poll(self.trial_timeout):
                fn_rval = conn1.recv()
                th.join()
            else:
                print 'TERMINATING DUE TO TIMEOUT'
                th.terminate()
                th.join()
                fn_rval = 'return', {
                    'status': hyperopt.STATUS_FAIL,
                    'failure': 'TimeOut'
                }

            assert fn_rval[0] in ('raise', 'return')
            if fn_rval[0] == 'raise':
                raise fn_rval[1]

            # -- remove potentially large objects from the rval
            #    so that the Trials() object below stays small
            #    We can recompute them if necessary, and it's usually
            #    not necessary at all.
            if fn_rval[1]['status'] == hyperopt.STATUS_OK:
                fn_loss = float(fn_rval[1].get('loss'))
                fn_preprocs = fn_rval[1].pop('preprocs')
                fn_classif = fn_rval[1].pop('classifier')
                if fn_loss < self._best_loss:
                    self._best_preprocs = fn_preprocs
                    self._best_classif = fn_classif
                    self._best_loss = fn_loss
            return fn_rval[1]
开发者ID:richlewis42,项目名称:hyperopt-sklearn,代码行数:36,代码来源:estimator.py

示例7: New_Process_Actor

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
class New_Process_Actor(Actor):
    '''Create an Actor in a new process. Connected as usual with scipysim 
    channels. When this Actor is started, it launches a new process, creates
    an instance of the Actor class passed to it in a second thread, and starts
    that actor.
    '''
    def __init__(self, cls, *args, **kwargs):
        super(New_Process_Actor, self).__init__()
        self.cls = cls
        self.args = list(args)
        self.kwargs = kwargs
        self.mqueue = MQueue()
        self.mevent = MEvent()
        
        if 'input_channel' not in kwargs:
            kwargs['input_channel'] = self.args[0]
        
        chan = kwargs['input_channel']
        kwargs['input_channel'] = self.mqueue
        
        
        print 'chan: ', chan
        self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
        
        self.c2p.start()


    def run(self):
        self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
        self.t.start()
        self.mevent.set() # signal that process is ready to receive
        self.c2p.join()
        self.t.join()
开发者ID:hardbyte,项目名称:scipy-sim,代码行数:35,代码来源:plotter.py

示例8: main

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def main(): 
    parca =[]
    counter = 0
    fo = open("C:\Users\Toshiba-PC\Desktop\Dagitik\metin.txt","r")
    text = fo.read(l)
    while text != "":
       text = text.lower()
       parca.append(text)
       counter += l
       fo.seek(counter)
       text = fo.read(l)
    fo.close()
        
    #print parca

    work_queue = Queue()
    done_queue = Queue()
    processes = []
    
    for data in parca: 
        work_queue.put(data)
        
    for w in xrange(n): 
        p = Process(target=worker, args=(work_queue, done_queue)) 
        p.start() 
        processes.append(p) 
        work_queue.put('STOP')
    print processes
    
    for p in processes: 
        p.join()
    done_queue.put('STOP')
    for status in iter(done_queue.get, 'STOP'): 
        print status
开发者ID:izelataman,项目名称:dagitik,代码行数:36,代码来源:caesar_fork.py

示例9: make_time_series_plot_wrapper

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def make_time_series_plot_wrapper(input_file='', prefix='temp'):
    ''' wrapper around make_time_series_plot '''
    from .audio_utils import make_time_series_plot
    tmp_ = Process(target=make_time_series_plot, args=(input_file, prefix,))
    tmp_.start()
    tmp_.join()
    return 'Done'
开发者ID:ddboline,项目名称:roku_app,代码行数:9,代码来源:roku_utils.py

示例10: wrapper

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
 def wrapper(*args, **kwargs):
     process = Process(None, func, None, args, kwargs)
     process.start()
     process.join(seconds)
     if process.is_alive():
         process.terminate()
         raise TimeoutError(error_message)
开发者ID:doitwrong,项目名称:daa-competition,代码行数:9,代码来源:custom_decorators.py

示例11: start_schedulers

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def start_schedulers(apps='w2p_tvseries'):
    try:
        from multiprocessing import Process
    except:
        sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
        return
    processes = []
    apps = [app.strip() for app in apps.split(',')]
    code = "from gluon import current; current._scheduler.max_empty_runs=10; current._scheduler.loop()"
    logging.getLogger().setLevel(logging.INFO)
    if len(apps) == 1:
        print 'starting single-scheduler for "%s"...' % apps[0]
        run(apps[0], True, True, None, False, code)
        return
    for app in apps:
        print 'starting scheduler for "%s"...' % app
        args = (app, True, True, None, False, code)
        p = Process(target=run, args=args)
        processes.append(p)
        print "Currently running %s scheduler processes" % (len(processes))
        p.start()
        print "Processes started"
    for p in processes:
        try:
            p.join()
        except (KeyboardInterrupt, SystemExit):
            print "Processes stopped"
        except:
            p.terminate()
            p.join()
开发者ID:imclab,项目名称:w2p_tvseries,代码行数:32,代码来源:w2p_tvseries.py

示例12: pullMusic

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def pullMusic(folders):	
	""" 
		Walk through the music folders and create song objects.  
		Return an array
	"""
	print "Start Parsing Folders!"
	lock = Lock()
	dbQueue = Queue()
	
	# create a process for each music folder in the configuration file
	for folder in folders:
		walker = Process(target=worker.walker, args=(folder, dbQueue, lock,))
		walker.start()
	while dbQueue.empty():
		pass
	
	# create a process to enter files from the dbQueue into the database
	enterdb = Process(target=worker.enterDB, args=(dbQueue, lock))
	enterdb.start()

	# wait until enterDB is finished before starting
	# This can be taken out later.  I want complete information for testing
	enterdb.join()
	
	print "Done!"
开发者ID:clly,项目名称:strmr,代码行数:27,代码来源:parse.py

示例13: watcher

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
def watcher():
    """This little code snippet is from
    http://greenteapress.com/semaphores/threading_cleanup.py (2012-07-31)
    It's now possible to interrupt the testrunner via ctrl-c at any time
    in a platform neutral way."""
    if sys.platform == 'win32':
        p = Process(target=main, name="MainProcess")
        p.start()
        try:
            p.join()
            rc = p.exitcode
            if rc > 0:
                sys.exit(rc)
        except KeyboardInterrupt:
            print 'KeyBoardInterrupt'
            p.terminate()
    else:
        child = os.fork()
        if child == 0:
            main() # child runs test
        try:
            rc = os.waitpid(child, 0)[1] /256 # exit status is the high order byte of second member of the tuple
            if rc > 0:
                sys.exit( rc )
        except KeyboardInterrupt:
            print 'KeyBoardInterrupt'
            try:
                os.kill(child, signal.SIGKILL)
            except OSError:
                pass
        except OSError:
            pass

    sys.exit()
开发者ID:arod1987,项目名称:testrunner,代码行数:36,代码来源:testrunner.py

示例14: emailSubsystem

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
class emailSubsystem(object):
    def __init__(self):
        ### will move to Celery eventually; with Celery, the app would be able to periodically
        # wakeup and check on replyQueue to see which emails were send, which were not and
        # what to do ...

        self.emailQueue = JoinableQueue()
        self.replyQueue = JoinableQueue()

        self.worker = Process(target=sendEmailWorker, args=(self.emailQueue, self.replyQueue))

    def start(self):
        # temporarily comment out starting a new process as it seems to leave zombies
        # and causes app not to start as max process limit is reached.
        #self.worker.start()
        return

    def shutdown(self):
        # post poison pill
        # wait on the queue to be done; ie join on emailQueue
        # wait on the worker process to die; ie join on worker

        self.emailQueue.put(None)
        self.emailQueue.join()
        self.worker.join()
开发者ID:haribcva,项目名称:the_library_app,代码行数:27,代码来源:library.py

示例15: __parseClub

# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import join [as 别名]
    def __parseClub(self):
        """
        get all posts urls in a club
        url - the root url of a  club
        initDeadline - the default timestamp when there is no file existed
        """

        while not self.__PageUrlQueue.empty() and \
            self.__parsePage(self.__PageUrlQueue.get()):
            if len(self.__PostUrlList) > 50:
                break

        print("Length of List:%d", len(self.__PostUrlList))

        processes = []
        while len(self.__PostUrlList):
            listLen=len(self.__PostUrlList)
            if MaxProcessNum > listLen:
                processNum = listLen
            else:
                processNum = MaxProcessNum
            for i in range(processNum):
                url = self.__PostUrlList.pop()
                postParser = PostParser(self.__url, url)
                subProcess = Process(target=postParser.parse)
                processes.append(subProcess)
                subProcess.start()
                subProcess.join()

        print('Done retrieving all posts of club url : %s' % self.__url)
        return True
开发者ID:Hearen,项目名称:Scrawlers,代码行数:33,代码来源:ClubParser.py


注:本文中的multiprocessing.Process.join方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。