當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.Manager方法代碼示例

本文整理匯總了Python中multiprocessing.Manager方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.Manager方法的具體用法?Python multiprocessing.Manager怎麽用?Python multiprocessing.Manager使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Manager方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def main():
  m = multiprocessing.Manager()
  sharedQueue = m.Queue()
  sharedQueue.put(2)
  sharedQueue.put(3)
  sharedQueue.put(4)

  process1 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process1.start()

  process2 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process2.start()
  
  process3 = multiprocessing.Process(target=myTask, args=(sharedQueue,))
  process3.start()
  
  process2.join()
  process1.join()
  process3.join() 
開發者ID:PacktPublishing,項目名稱:Learning-Concurrency-in-Python,代碼行數:21,代碼來源:mpQueue.py

示例2: add_cmd_tasks

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def add_cmd_tasks(cmd_task_list, identifier=None, stdin_error_lock=mp.Manager().Lock()):
		"""
			Run several command line commands in parallel.

			@attention: use the Manager to get the lock as in this function definition !!!

			@type cmd_task_list: list of TaskCmd
			@param stdin_error_lock: acquiring the lock enables writing to the stdout and stderr

			@return: list of failed commands, dictionary (cmd, task process)
		"""
		assert isinstance(cmd_task_list, list)

		thread_task_list = []
		for cmdTask in cmd_task_list:
			assert isinstance(cmdTask, TaskCmd)
			thread_task_list.append(TaskThread(_runCmd, (cmdTask, stdin_error_lock)))

		return AsyncParallel.add_tasks(thread_task_list, identifier) 
開發者ID:CAMI-challenge,項目名稱:CAMISIM,代碼行數:21,代碼來源:parallel.py

示例3: trigger_request_process_and_return_response

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def trigger_request_process_and_return_response(rows_to_request):
    process_manager = Manager()
    shared_queue = process_manager.Queue()
    shared_queue_list = []
    list_process = []

    # Trigger Process in rows
    for index, row in rows_to_request.iterrows():
        token, account = get_token_and_account_number_or_wait()
        p = Process(target=trigger_facebook_call, args=(index, row, token, account, shared_queue))
        list_process.append(p)

    # Starting process
    map(lambda p: p.start(), list_process)
    # Stop process
    map(lambda p: p.join(), list_process)
    #Check for Exception
    map(lambda p: check_exception(p), list_process)

    # Put things from shared list to normal list
    while shared_queue.qsize() != 0:
        shared_queue_list.append(shared_queue.get())
    return shared_queue_list 
開發者ID:maraujo,項目名稱:pySocialWatcher,代碼行數:25,代碼來源:utils.py

示例4: use_virustotal

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def use_virustotal(args):
    """
    Use Virustotal to download the environment malware
    """
    m = multiprocessing.Manager()
    download_queue = m.JoinableQueue(args.nconcurrent)

    archive_procs = [
        multiprocessing.Process(
            target=download_worker_function,
            args=(download_queue, args.vtapikey))
        for i in range(args.nconcurrent)
    ]
    for w in archive_procs:
        w.start()

    for row in get_sample_hashes():
        download_queue.put(row["sha256"])

    for i in range(args.narchiveprocs):
        download_queue.put("STOP")

    download_queue.join()
    for w in archive_procs:
        w.join() 
開發者ID:endgameinc,項目名稱:gym-malware,代碼行數:27,代碼來源:download_samples.py

示例5: run_in_separate_process

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def run_in_separate_process(func, *args, **kwargs):
    """Runs function in separate process.

    This function is used instead of a decorator, since Python multiprocessing
    module can't serialize decorated function on all platforms.
    """
    manager = multiprocessing.Manager()
    manager_dict = manager.dict()
    process = ProcessWithException(
        manager_dict, target=func, args=args, kwargs=kwargs)
    process.start()
    process.join()
    exc = process.exception
    if exc:
        raise exc
    return process.output 
開發者ID:nvdv,項目名稱:vprof,代碼行數:18,代碼來源:base_profiler.py

示例6: test_init

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def test_init():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', False)
    serv_p = Process(target=server_func, args=(2, 'test_graph1', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_init_func, args=(0, 'test_graph1', return_dict))
    work_p2 = Process(target=check_init_func, args=(1, 'test_graph1', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
開發者ID:dmlc,項目名稱:dgl,代碼行數:21,代碼來源:test_shared_mem_store.py

示例7: test_compute

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def test_compute():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph3', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_compute_func, args=(0, 'test_graph3', return_dict))
    work_p2 = Process(target=check_compute_func, args=(1, 'test_graph3', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
開發者ID:dmlc,項目名稱:dgl,代碼行數:21,代碼來源:test_shared_mem_store.py

示例8: test_sync_barrier

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def test_sync_barrier():
    manager = Manager()
    return_dict = manager.dict()

    # make server init before worker
    server_init = Value('i', 0)
    serv_p = Process(target=server_func, args=(2, 'test_graph4', server_init))
    serv_p.start()
    while server_init.value == 0:
      time.sleep(1)
    work_p1 = Process(target=check_sync_barrier, args=(0, 'test_graph4', return_dict))
    work_p2 = Process(target=check_sync_barrier, args=(1, 'test_graph4', return_dict))
    work_p1.start()
    work_p2.start()
    serv_p.join()
    work_p1.join()
    work_p2.join()
    for worker_id in return_dict.keys():
        assert return_dict[worker_id] == 0, "worker %d fails" % worker_id 
開發者ID:dmlc,項目名稱:dgl,代碼行數:21,代碼來源:test_shared_mem_store.py

示例9: test_answer_challenge_auth_failure

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def test_answer_challenge_auth_failure(self):
        class _FakeConnection(object):
            def __init__(self):
                self.count = 0
            def recv_bytes(self, size):
                self.count += 1
                if self.count == 1:
                    return multiprocessing.connection.CHALLENGE
                elif self.count == 2:
                    return b'something bogus'
                return b''
            def send_bytes(self, data):
                pass
        self.assertRaises(multiprocessing.AuthenticationError,
                          multiprocessing.connection.answer_challenge,
                          _FakeConnection(), b'abc')

#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
# 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:22,代碼來源:test_multiprocessing.py

示例10: initialize_unittest_logging

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def initialize_unittest_logging():
    # ACE is multi-process multi-threaded
    # so we use this special logging mechanism to keep a central repository of the log events generated
    # that the original process can access

    global test_log_manager
    global test_log_sync
    global test_log_messages
    global memory_log_handler

    test_log_manager = Manager()
    atexit.register(_atexit_callback)
    test_log_sync = RLock()
    test_log_messages = test_log_manager.list()

    log_format = logging.Formatter(datefmt='%(asctime)s')

    memory_log_handler = MemoryLogHandler()
    memory_log_handler.setLevel(logging.DEBUG)
    memory_log_handler.setFormatter(log_format)
    logging.getLogger().addHandler(memory_log_handler) 
開發者ID:IntegralDefense,項目名稱:ACE,代碼行數:23,代碼來源:test.py

示例11: _import_mp

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning) 
開發者ID:singhj,項目名稱:locality-sensitive-hashing,代碼行數:22,代碼來源:multiprocess.py

示例12: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def __init__(self, _id):
        self.manager = Manager()

        self.event_loop_id = _id
        self.target = Target.new_target(self.event_loop_id, self.__class__.__name__)
        self.__subscribers = {}
        self.__subscribers_lock = self.manager.Lock()
        self.__publishers = {}
        self.__client = None
        self.__main_loop = None
        self.__pid = os.getpid()

        self.__topicPub = Topic()
        self.__topicPub.set_targets(Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicPub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.RESPONSE)

        self.__topicSub = Topic()
        self.__topicSub.set_targets(None, Target.new_target(self.event_loop_id, EventLoop.__name__))
        self.__topicSub.set_categories(EVENT_LOOP.TOPIC.CATEGORIES.REQUEST)
        self.__topicSub.set_message(EventLoopMessage)
        self.set_subscriber(self.__topicSub, self.on_event_loop_message)

        self.__user_data = None
        self.__user_will = None 
開發者ID:CPFL,項目名稱:AMS,代碼行數:26,代碼來源:event_loop.py

示例13: summary_multi_preprocess

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def summary_multi_preprocess(doc, num=None, fs=[text_pronouns, text_teaser, mmr, text_rank, lead3, lda, lsi, nmf]):
    """
        len(fs) 個進程
    :param doc: str
    :return: list
    """
    manager = Manager()
    return_dict = manager.dict()
    jobs = []
    for i in range(len(fs)):
        p = Process(target=worker, args=(i, doc, num, fs, return_dict))
        jobs.append(p)
        p.start()
    for proc in jobs:
        proc.join()
    return list(return_dict.values()) 
開發者ID:yongzhuo,項目名稱:nlg-yongzhuo,代碼行數:18,代碼來源:text_summary_merge.py

示例14: main

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def main():
    try:
        parser = get_parser()
        args = parser.parse_args()
        if args.media_type == 'both':
            args.media_type = 'audio/video'
        globals()['media_type'] = REGEX_MAP[args.media_type]
        cache_ob = CacheUtil(args.path, args.media_type)
        manager = multiprocessing.Manager()
        queue = manager.Queue()
        consumer = multiprocessing.Process(target=store_in_cache, args=(queue, cache_ob))
        consumer.start()
        result = calculate_length(
            args.path, args.no_subdir, args.media_type, queue, cache_ob
        )
        consumer.join()
    except KeyboardInterrupt:
        sys.stdout.write('\nPlease wait... exiting gracefully!\n')
    else:
        sys.stdout.write('\n{}\n\n'.format(result))
    finally:
        sys.exit() 
開發者ID:karansthr,項目名稱:Playlist-Length,代碼行數:24,代碼來源:main.py

示例15: __init__

# 需要導入模塊: import multiprocessing [as 別名]
# 或者: from multiprocessing import Manager [as 別名]
def __init__(self,
                 kube_config: Any,
                 task_queue: 'Queue[KubernetesJobType]',
                 result_queue: 'Queue[KubernetesResultsType]',
                 kube_client: client.CoreV1Api,
                 worker_uuid: str):
        super().__init__()
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration_pod = WorkerConfiguration(kube_config=self.kube_config).as_pod()
        self._manager = multiprocessing.Manager()
        self.watcher_queue = self._manager.Queue()
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher() 
開發者ID:apache,項目名稱:airflow,代碼行數:22,代碼來源:kubernetes_executor.py


注:本文中的multiprocessing.Manager方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。