当前位置: 首页>>代码示例>>Python>>正文


Python ThreadPoolExecutor.submit方法代码示例

本文整理汇总了Python中concurrent.futures.thread.ThreadPoolExecutor.submit方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPoolExecutor.submit方法的具体用法?Python ThreadPoolExecutor.submit怎么用?Python ThreadPoolExecutor.submit使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在concurrent.futures.thread.ThreadPoolExecutor的用法示例。


在下文中一共展示了ThreadPoolExecutor.submit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: HttpService

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class HttpService(object):
    
    def __init__(self):
        self.__async_executor = ThreadPoolExecutor(max_workers=10)
        self.logger = logging.getLogger(__name__)
        self.__http = Http()
    
    def get(self, request):
        return self.make_request(request, 'GET')
    
    def post(self, request):
        return self.make_request(request, 'POST')
    
    def put(self, request):
        return self.make_request(request, 'PUT')
    
    def delete(self, request):
        return self.make_request(request, 'DELETE')
    
    def make_request(self, request, method):
        future = HttpFuture()
        self.__async_executor.submit(self.__do_request, request, method, future)
        return future

    def __do_request(self, request, method, future):
        try:
            uri = request.url + urllib.parse.urlencode(request.parameters)
            headers, content = self.__http.request(uri, method, request.data, request.headers)
            future.fulfill(headers, content)
        except Exception as ex:
            self.logger.exception("Http __do_request attempt failed with exception")
开发者ID:westonpace,项目名称:bowser,代码行数:33,代码来源:http.py

示例2: ThreadedPoolExecutor

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class ThreadedPoolExecutor(PoolExecutor):
    '''
    Pooled executor implementation based on a wrapped
    ThreadPoolExecutor object.
    '''
    def __init__(self, context, max_workers=1):
        super(ThreadedPoolExecutor, self).__init__(context)
        self._pool = ThreadPoolExecutor(max_workers)
    
    def execute(self, task):
        self._pool.submit(task.processor)
开发者ID:shirdrn,项目名称:python,代码行数:13,代码来源:thread.py

示例3: test_log_traceback_threaded

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
def test_log_traceback_threaded(caplog):
    @log_traceback
    def f():
        raise Exception()

    e = ThreadPoolExecutor(max_workers=1)
    f = e.submit(f)
    while f.running():
        time.sleep(0.1)

    assert caplog.records()[0].message.endswith(" is about to be started")
    assert caplog.records()[1].message.startswith("Traceback")
    assert caplog.records()[1].message.endswith("Exception\n")
开发者ID:TomasTomecek,项目名称:sen,代码行数:15,代码来源:test_util.py

示例4: prepareServer

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
def prepareServer(RequestHandlerClass, pipe, threads, timeout):
    '''
    Prepare in a process the request handling.
    '''
    def process(request, address):
        RequestHandlerClass(request, address, None)
        try: request.shutdown(socket.SHUT_WR)
        except socket.error: pass  # some platforms may raise ENOTCONN here
        request.close()
    
    pool = ThreadPoolExecutor(threads)
    while True:
        if not pipe.poll(timeout): break
        else:
            data = pipe.recv()
            if data is None: break
            elif data is True: continue
            
            requestfd, address = data
            request = socket.fromfd(rebuild_handle(requestfd), socket.AF_INET, socket.SOCK_STREAM)
            
            pool.submit(process, request, address)
            
    pool.shutdown(False)
开发者ID:AtomLaw,项目名称:Ally-Py,代码行数:26,代码来源:server_production.py

示例5: DispatcherHTTPServer

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class DispatcherHTTPServer(HTTPServer):
    def __init__(self, server_address, RequestHandlerClass,
                 bind_and_activate=True, handlers=[],
                 srv_path=".",
                 configuration={}):
        HTTPServer.__init__(self, server_address, RequestHandlerClass, bind_and_activate)
        self.handlers = sorted(handlers, key=lambda k: k["weight"])
        self.srv_path = srv_path
        self.configuration = configuration
        self.logger = self.setup_logger()
        self.executor = ThreadPoolExecutor(max_workers=20)
        self.initialize_server()
        
    def initialize_server(self):
        self.logger.info('Initializing server')
    
    def finish_request(self, request, client_address):
        def async_finish_request(server, request, client_address, logger):
            server.RequestHandlerClass(request, client_address, server, logger)
        self.executor.submit(async_finish_request(self, request, client_address, self.logger))
    
    def setup_logger(self):
        logger = None
        if self.configuration.get('log_config_file') is not None:
            logger = self.get_logger(self.configuration.get('log_config_file'))
        else:
            logger = self.get_default_logger()
        return logger
        
    def get_logger(self, config_file):
        logging.config.fileConfig(config_file)
        return logging.getLogger('srv')
    
    def get_default_logger(self):
        logging.basicConfig(level=logging.INFO)
        return logging.getLogger('srv')
开发者ID:natemago,项目名称:srv,代码行数:38,代码来源:srv.py

示例6: Bender

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class Bender(object):

    def __init__(self, backbone, brain=None):
        self._backbone = backbone
        self._brain = brain if brain is not None else Brain()
        self._brain_lock = threading.Lock()
        self._regex_to_response = OrderedDict()
        self._scripts = OrderedDict()

        self._pool = ThreadPoolExecutor(max_workers=4)
        self._futures = []  # list of futures submitted to the pool
        self._stop_loop = threading.Event()

    def register_script(self, name, script):
        self._scripts[name] = script

    def register_builtin_scripts(self):
        for name, script in scripts.get_builtin_scripts():
            self.register_script(name, script)

    def register_setuptools_scripts(self):
        for p in pkg_resources.iter_entry_points('bender_script'):
            obj = p.load()
            if inspect.isclass(obj):
                obj = obj()
            self.register_script(p.name, obj)

    def get_script(self, name):
        return self._scripts[name]

    def iter_scripts(self):
        return iter(self._scripts.items())

    def start(self):
        self._brain.load()
        self._backbone.on_message_received = self.on_message_received

        self.register_builtin_scripts()
        self.register_setuptools_scripts()

        for script in self._scripts.values():
            hooks.call_unique_hook(script, 'script_initialize_hook',
                                   brain=self._brain)

        hooks.call_unique_hook(self._backbone, 'backbone_start_hook')

    def shutdown(self):
        self._pool.shutdown(wait=True)
        for name, script in list(self._scripts.items()):
            self._scripts.pop(name)
            hooks.call_unique_hook(script, 'script_shutdown_hook',
                                   brain=self._brain)

        hooks.call_unique_hook(self._backbone, 'backbone_shutdown_hook',
                               brain=self._brain)
        self._brain.dump()
        self._stop_loop.set()

    def request_shutdown(self):
        self._stop_loop.set()

    def loop(self):
        self.start()
        self._stop_loop.wait()
        self.shutdown()

    def on_message_received(self, msg):

        def thread_exec(hook, brain, msg, match):
            try:
                hooks.call(hook, brain=self._brain, msg=msg, match=match,
                           bender=self)
            except Exception as e:
                msg.reply('*BZZT* %s' % e)
            else:
                with self._brain_lock:
                    brain.dump()

        handled = False
        for script in self._scripts.values():
            for hook in hooks.find_hooks(script, 'respond_hook'):
                match = re.match(hook.inputs['regex'], msg.get_body(),
                                 re.IGNORECASE | re.DOTALL)
                if match:
                    f = self._pool.submit(thread_exec, hook, self._brain, msg,
                                          match)
                    self._futures.append(f)
                    handled = True

        if not handled:
            msg.reply('Command not recognized')

    def wait_all_messages(self):
        while self._futures:
            f = self._futures.pop()
            f.result()  # wait until future returns
开发者ID:bender-bot,项目名称:bender,代码行数:98,代码来源:_bender.py

示例7: __init__

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class RuleService:


    def __init__(self):
        # Rule processors.
        self.__ruleController = RuleController()
        self.__methodController = MethodController()
        self.__triggerController = TriggerController.instance()
        
        self.__ruleUpdateThreadPool = ThreadPoolExecutor(max_workers=1)
        self.__ruleExecThreadPool = ThreadPoolExecutor(max_workers=AppConstants.MAX_RULE_EXEC_THREAD_SIZE)
        
        # Rule run workers.
        self.__ruleExecInfos = {}
        self.__condCallGroup = MethodCallGroup()
        self.__execCallGroup = MethodCallGroup()
        
        # Listeners.
        self.__ruleController.listen_to_rule_status_change(self.__on_rule_status_changed)
        GroupController.instance().listen_to_group_icon_change(self.__on_group_icon_changed)
        self.__methodController.listen_to_method_status_change(self.__on_method_status_changed)
        EventController.instance().listen_to_event_callback(self.__on_method_event_callback)
        self.__triggerController.listen_to_trigger_callback(self.__on_trigger_callback)
        
    def __on_rule_status_changed(self, ruleId, oldEnabled, newEnabled, oldStatusProcessed, newStatusProcessed):
        '''
        Trigger Source: RuleController --> This
        Callback when a rule is re-enabled OR statusProcessed changed to "updated".
        '''
        if newEnabled == True and newStatusProcessed == AppConstants.RULE_STATUS_UPDATED:
            if oldEnabled != newEnabled or oldStatusProcessed != newStatusProcessed:
                self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
        
    def __on_group_icon_changed(self, kbxGroupId):
        '''
        Trigger Source: GroupController --> This
        Callback when kbxGroupIcon changed.
        '''
        ruleIdsFromCond = self.__ruleController.list_rule_ids_which_has_kbx_group_id_as_condition(kbxGroupId)
        ruleIdsFromExec = self.__ruleController.list_rule_ids_which_has_kbx_group_id_as_execution(kbxGroupId)
            
        # Broadcast rules updated messages.
        for ruleId in set(ruleIdsFromCond + ruleIdsFromExec):
            self.__broadcast_message__rule_updated(ruleId)

    def __on_method_status_changed(self, kbxMethodId, oldKBXMethodStatus, newKBXMethodStatus):
        '''
        Trigger Source: MethodController --> This
        Callback when kbxMethodStatus changed.
        '''
        if oldKBXMethodStatus != newKBXMethodStatus:
            ruleIdsFromCond = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_condition(kbxMethodId)
            ruleIdsFromExec = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_execution(kbxMethodId)
                
#             # Executes rules with conditions affected.
#             if newKBXMethodStatus == SharedMethod.METHOD_STATUS_ACTIVE:
#                 for ruleId in ruleIdsFromCond:
#                     self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
            
            # Broadcast rules updated messages.
            for ruleId in set(ruleIdsFromCond + ruleIdsFromExec):
                self.__broadcast_message__rule_updated(ruleId)
                
    def __on_method_event_callback(self, kbxMethodId, eventTag, eventData):
        '''
        Trigger Source: EventController --> MethodController --> This
        Callback when a method with event broadcasted event.
        '''
        ruleIds = self.__ruleController.list_rule_ids_which_has_kbx_method_id_as_condition(kbxMethodId)
        for ruleId in ruleIds:
            self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, 
                                             checkCondition=True, eventTag=eventTag, 
                                             eventData=eventData, eventMethodId=kbxMethodId)
        
    def __on_trigger_callback(self, ruleId):
        '''
        Trigger Source: TriggerController --> This
        Callback when a rule is triggered.
        '''
        self.__ruleExecThreadPool.submit(self.__trigger_rule_implementation, ruleId=ruleId, checkCondition=True)
        
    def set_rule(self, trigger, condition, execution, ruleId=None, ruleName=None, ruleProtected=False, enabled=True):
        '''
        Create/Edit(with ruleId provided) an existing rule.
        
        trigger:Dictionary
        condition:List
        execution:List
        ruleId:Integer <Optional>
        ruleName:String <Optional>
        ruleProtected:Boolean <Optional>
        enabled:Boolean
        
        Returns "ruleId"
        '''
        def process_method_list(methodList):
            #===================================================================
            # Basic type validation
            #===================================================================
            if not isinstance(methodList, list):
#.........这里部分代码省略.........
开发者ID:TheStackBox,项目名称:xuansdk,代码行数:103,代码来源:ruleService.py

示例8: submit

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
    def submit(self, fn, *args, **kwargs):
        if isinstance(fn, Task):
            self._task_map[id(fn)] = fn

        return ThreadPoolExecutor.submit(self, fn, *args, **kwargs)
开发者ID:cathalgarvey,项目名称:bytestag,代码行数:7,代码来源:events.py

示例9: OpticalPathManager

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]

#.........这里部分代码省略.........
        Update the acquisition quality expected. Depending on the quality,
        some hardware settings will be adjusted.
        quality (ACQ_QUALITY): the acquisition quality
        """
        assert quality in (ACQ_QUALITY_FAST, ACQ_QUALITY_BEST)

        if quality == self.quality:
            return
        self.quality = quality

        if self.microscope.role in ("secom", "delphi"):
            if quality == ACQ_QUALITY_FAST:
                # Restore the fan (if it was active before)
                self._setCCDFan(True)
            # Don't turn off the fan if BEST: first wait for setPath()

    def setPath(self, mode, detector=None):
        """
        Given a particular mode it sets all the necessary components of the
        optical path (found through the microscope component) to the
        corresponding positions.
        path (stream.Stream or str): The stream or the optical path mode
        detector (Component or None): The detector which will be targeted on this
          path. This can only be set if the path is a str (optical mode). That
          is useful in case the mode can be used with multiple detectors (eg,
          fiber-align on a SPARC with multiple spectrometers). When path is a
          Stream, the Stream.detector is always used.
        return (Future): a Future allowing to follow the status of the path
          update.
        raises (via the future):
            ValueError if the given mode does not exist
            IOError if a detector is missing
        """
        f = self._executor.submit(self._doSetPath, mode, detector)

        return f

    def _doSetPath(self, path, detector):
        """
        Actual implementation of setPath()
        """
        if isinstance(path, stream.Stream):
            if detector is not None:
                raise ValueError("Not possible to specify both a stream, and a detector")
            try:
                mode = self.guessMode(path)
            except LookupError:
                logging.debug("%s doesn't require optical path change", path)
                return
            target = self.getStreamDetector(path)  # target detector
        else:
            mode = path
            if mode not in self._modes:
                raise ValueError("Mode '%s' does not exist" % (mode,))
            comp_role = self._modes[mode][0]
            if detector is None:
                target = self._getComponent(comp_role)
            else:
                target = detector

        logging.debug("Going to optical path '%s', with target detector %s.", mode, target.name)

        # Special SECOM mode: just look at the fan and be done
        if self.microscope.role in ("secom", "delphi"):
            if self.quality == ACQ_QUALITY_FAST:
                self._setCCDFan(True)
开发者ID:delmic,项目名称:odemis,代码行数:70,代码来源:path.py

示例10: RabbitManager

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]

#.........这里部分代码省略.........
        """
        self.log.info("Acknowledging message %s", delivery_tag)
        self._channel.basic_ack(delivery_tag)
        
    def requeue_message(self, delivery_tag):
        """
        
        :param delivery_tag: 
        :return:
        """
        self.log.info("Requeuing message %s", delivery_tag)
        self._channel.basic_nack(delivery_tag, requeue=True)

    def on_message(self, channel, basic_deliver, properties, body):
        """Invoked when message received from rabbit

        :param pika.channel.Channel channel:
        :param pika.spec.Basic.Deliver basic_deliver:
        :param pika.spec.BasicProperties properties:
        :param str body:
        :return:
        """

        self.log.info("Received messages # %s from %s",
                      basic_deliver.delivery_tag,
                      properties.app_id)
        
        try:
            if self._tasks_number >= self._max_tasks:
                raise RuntimeError("Max tasks limit reached")
            
            self._tasks_number += 1
            
            ftr = self._executor.submit(self.process_task, body)

            def process_done(future: Future):
                nonlocal self
                self._tasks_number -= 1
                if future.cancelled():
                    # process_task ended by cancel
                    self.requeue_message(self.requeue_message(
                        basic_deliver.delivery_tag)
                    )
                else:
                    if future.exception():
                        exception = future.exception()
                        if not isinstance(exception, RequeueMessage):
                            self.log.exception(exception)
                        
                        self.requeue_message(
                            basic_deliver.delivery_tag
                        )
                    else:
                        self.acknowledge_message(basic_deliver.delivery_tag)

            ftr.add_done_callback(process_done)

            return ftr

        except RuntimeError:
            self.requeue_message(basic_deliver.delivery_tag)
            time.sleep(0.5)

        except Exception as e:
            self.log.exception(e)
            self.requeue_message(basic_deliver.delivery_tag)
开发者ID:ifrpl,项目名称:toddler,代码行数:70,代码来源:__init__.py

示例11: do_test1

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
def do_test1(workers):
    param = {"max_workers": workers}
    start = round(time.time() + _start_warm_up)
    input = input_generator(workers, start)
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    for x in input:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))

        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))

        future = cexec.submit(async_wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, cresult, lock))

    texec.shutdown(False)
    pexec.shutdown(False)
    loop.run_until_complete(cexec.shutdown(False))

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - start) / _precision) for x in tresult]
    presult = [round((x - start) / _precision) for x in presult]
    cresult = [round((x - start) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (p == c)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
        result = result and (c == t)
        if not result:
            print(tresult)
            print(presult)
            print(cresult)
            print(t, p, c)
            assert False
    return result
开发者ID:leeopop,项目名称:coexecutor,代码行数:76,代码来源:test_utils.py

示例12: __init__

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]

#.........这里部分代码省略.........
        scene["sceneProtected"] = sceneProtected
        scene["sceneIcon"] = sceneIcon
        scene["execution"] = execution

        #=======================================================================
        # Append if its new scene
        #=======================================================================
        def __update_scene(scene):
            try:
                # Fire scene update start event
                sceneId = scene["sceneId"]
                sceneName = scene["sceneName"]
                    
                # Add methods to subscribe list
                methodIds = [kbxMethod["kbxMethodId"] for kbxMethod in scene["execution"]]
                self.__methodController.add(methodIds)
                    
                # Update "scene" base table
                self.__sceneController.update(scene)
                self.__sceneController.commit()

            except Exception as e:
                self.__sceneController.rollback()
                self.__broadcast_message__scene_update_failed(sceneId, sceneName)
                Logger.log_error("SceneService __update_scene failed:", e, "-- rolledback")
            else:
                # Broadcast message: completed updating a scene
                self.__broadcast_message__scene_updated(sceneId)

        #=======================================================================
        # Submit to a thread to process other info, and return... performance...
        #=======================================================================
        # Only 1 worker in the threadPool, it works as threading.Lock
        self.__sceneUpdateThreadPool.submit(__update_scene, scene)
        
        return sceneId

    @SCENE_PROCESS_SYNC
    def delete_scene(self, sceneId):
        self.__verify_scene_updated(sceneId)
        
        try:
            favSort = self.__sceneController.get_favsort_of(sceneId) # To determine should favorited_Scene_deleted broadcasted
            self.__sceneController.delete(sceneId)
            self.__sceneController.commit()
        except Exception as e:
            self.__sceneController.rollback()
            traceback.print_exc()
            Logger.log_error("SceneService delete_scene ex:", e, "-- rolled back")
            raise AutomationException(11906, "Unable to delete scene, problem - " + str(e))
        else:
            self.__broadcast_message__scene_deleted(sceneId)
            if favSort is not None:
                self.__broadcast_message__favorited_scene_deleted(sceneId)

    @SCENE_PROCESS_SYNC
    def execute_scene(self, sceneId, serUrl=None, language="en"):
        '''
        Execute a scene.
        Scene execution will only be recorded if serUrl is specified.
        '''
        self.__verify_scene_updated(sceneId)
        
        self.__sceneExecLocks.setdefault(sceneId, SceneExecLock())
        sceneExecLock = self.__sceneExecLocks.get(sceneId)
        isAcquired = sceneExecLock.acquire(False) # Raise error if failed to acquire.
开发者ID:TheStackBox,项目名称:xuansdk,代码行数:70,代码来源:sceneService.py

示例13: do_test3

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
def do_test3(workers):
    param = {"max_workers": workers}
    loop = asyncio.new_event_loop()

    lock = threading.Lock()
    tresult = []
    presult = []
    cresult = []

    pre_input1 = input_generator(workers, 0)
    pre_input2 = input_generator(workers, max(pre_input1))
    pre_input3 = input_generator(workers, max(pre_input2))

    def result_checker(list, lock, fut):
        with lock:
            try:
                list.append(fut.result())
            except Exception as e:
                list.append(e)

    texec = ThreadPoolExecutor(**param)
    pexec = ProcessPoolExecutor(**param)
    cexec = CoroutinePoolExecutor(**param, loop=loop)

    tstart = round(time.time()+1)
    input1 = [tstart + i for i in pre_input1]
    input2 = [tstart + i for i in pre_input2]
    input3 = [tstart + i for i in pre_input3]

    for x in input1:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    result_iter = texec.map(wake_at, input2)
    for x in input3:
        future = texec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, tresult, lock))
    for x in result_iter:
        with lock:
            tresult.append(x)

    texec.shutdown(True)

    pstart = round(time.time() + _start_warm_up)
    input1 = [pstart + i for i in pre_input1]
    input2 = [pstart + i for i in pre_input2]
    input3 = [pstart + i for i in pre_input3]

    for x in input1:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    result_iter = pexec.map(wake_at, input2)
    for x in input3:
        future = pexec.submit(wake_at, x)
        future.add_done_callback(
            functools.partial(result_checker, presult, lock))
    for x in result_iter:
        with lock:
            presult.append(x)

    pexec.shutdown(True)

    cstart = round(time.time() + _start_warm_up)
    input1 = [cstart + i for i in pre_input1]
    input2 = [cstart + i for i in pre_input2]
    input3 = [cstart + i for i in pre_input3]

    async def async_main():
        for x in input1:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        result_iter = cexec.map(async_wake_at, input2)
        for x in input3:
            future = cexec.submit(async_wake_at, x)
            future.add_done_callback(
                functools.partial(result_checker, cresult, lock))
        async for x in result_iter:
            with lock:
                cresult.append(x)
        await cexec.shutdown(False)

    loop.run_until_complete(async_main())

    try:
        loop.run_until_complete(cexec.shutdown(True))
        texec.shutdown(True)
        pexec.shutdown(True)
    finally:
        loop.close()

    tresult = [round((x - tstart) / _precision) for x in tresult]
    presult = [round((x - pstart) / _precision) for x in presult]
    cresult = [round((x - cstart) / _precision) for x in cresult]

    result = True
    for (t, p, c) in zip(tresult, presult, cresult):
        result = result and (t == p)
#.........这里部分代码省略.........
开发者ID:leeopop,项目名称:coexecutor,代码行数:103,代码来源:test_utils.py

示例14: square

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
'''
Created on 16-Nov-2015

@author: Virendra
'''
from concurrent.futures.thread import ThreadPoolExecutor
import threading
import time

def square(n):
    print ("Calculating square of %d by thread name %s " % (n, threading.current_thread()))
    time.sleep(3)
    print ("Square of number %d is %d calculated by thread %s " % (n, n*n, threading.current_thread()))

executor = ThreadPoolExecutor(max_workers=5)
numbers = range(1,10)
for number in numbers:
    executor.submit(square, number)
开发者ID:cycorax12,项目名称:Python-Snippets,代码行数:20,代码来源:MulthithreadProcessor.py

示例15: UI

# 需要导入模块: from concurrent.futures.thread import ThreadPoolExecutor [as 别名]
# 或者: from concurrent.futures.thread.ThreadPoolExecutor import submit [as 别名]
class UI(urwid.MainLoop):
    def __init__(self):
        self.d = DockerBackend()

        # root widget
        self.mainframe = urwid.Frame(urwid.SolidFill())
        self.buffers = []
        self.footer = Footer(self)

        self.executor = ThreadPoolExecutor(max_workers=4)

        root_widget = urwid.AttrMap(self.mainframe, "root")
        self.main_list_buffer = None  # singleton

        screen = urwid.raw_display.Screen()
        screen.set_terminal_properties(256)
        screen.register_palette(PALLETE)

        super().__init__(root_widget, screen=screen)
        self.handle_mouse = False
        self.current_buffer = None

    def run_in_background(self, task, *args, **kwargs):
        logger.info("running task %r(%s, %s) in background", task, args, kwargs)
        self.executor.submit(task, *args, **kwargs)

    def refresh(self):
        try:
            self.draw_screen()
        except AssertionError:
            logger.warning("application is not running")
            pass

    def _set_main_widget(self, widget, redraw):
        """
        add provided widget to widget list and display it

        :param widget:
        :return:
        """
        self.mainframe.set_body(widget)
        self.reload_footer()
        if redraw:
            logger.debug("redraw main widget")
            self.refresh()

    def display_buffer(self, buffer, redraw=True):
        """
        display provided buffer

        :param buffer: Buffer
        :return:
        """
        self.current_buffer = buffer
        self._set_main_widget(buffer.widget, redraw=redraw)

    def add_and_display_buffer(self, buffer, redraw=True):
        """
        add provided buffer to buffer list and display it

        :param buffer:
        :return:
        """
        if buffer not in self.buffers:
            logger.debug("adding new buffer {!r}".format(buffer))
            self.buffers.append(buffer)
        self.display_buffer(buffer, redraw=redraw)

    def pick_and_display_buffer(self, i):
        """
        pick i-th buffer from list and display it

        :param i: int
        :return: None
        """
        if len(self.buffers) == 1:
            # we don't need to display anything
            # listing is already displayed
            return
        else:
            try:
                self.display_buffer(self.buffers[i])
            except IndexError:
                # i > len
                self.display_buffer(self.buffers[0])

    @property
    def current_buffer_index(self):
        return self.buffers.index(self.current_buffer)

    def remove_current_buffer(self):
        # don't allow removing main_list
        if isinstance(self.current_buffer, MainListBuffer):
            logger.warning("you can't remove main list widget")
            return
        self.buffers.remove(self.current_buffer)
        self.current_buffer.destroy()
        # FIXME: we should display last displayed widget here
        self.display_buffer(self.buffers[0], True)

#.........这里部分代码省略.........
开发者ID:bobiwembley,项目名称:sen,代码行数:103,代码来源:init.py


注:本文中的concurrent.futures.thread.ThreadPoolExecutor.submit方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。