当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Manager方法代码示例

本文整理汇总了Python中torch.multiprocessing.Manager方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Manager方法的具体用法?Python multiprocessing.Manager怎么用?Python multiprocessing.Manager使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Manager方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parallelize_sessions

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def parallelize_sessions(self, global_nets=None):
        mp_dict = mp.Manager().dict()
        # spec_util.tick(self.spec, 'session')
        # mp_run_session(deepcopy(self.spec), global_nets, mp_dict)
        workers = []
        for _s in range(self.spec['meta']['max_session']):
            spec_util.tick(self.spec, 'session')
            w = mp.Process(target=mp_run_session, args=(deepcopy(self.spec), global_nets, mp_dict))
            w.start()
            workers.append(w)
        for w in workers:
            w.join()
        session_metrics_list = [mp_dict[idx] for idx in sorted(mp_dict.keys())]
        return session_metrics_list 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:16,代码来源:control.py

示例2: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def __init__(self, capacity, buffer_gpu):
		self.capacity = capacity; self.buffer_gpu = buffer_gpu; self.counter = 0
		self.manager = Manager()
		self.tuples = self.manager.list() #Temporary shared buffer to get experiences from processes
		self.s = []; self.ns = []; self.a = []; self.r = []; self.done = []

		# Temporary tensors that cane be loaded in GPU for fast sampling during gradient updates (updated each gen) --> Faster sampling - no need to cycle experiences in and out of gpu 1000 times
		self.sT = None; self.nsT = None; self.aT = None; self.rT = None; self.doneT = None 
开发者ID:IntelAI,项目名称:cerl,代码行数:10,代码来源:buffer.py

示例3: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def __init__(self, shared: bool = False, data: dict = None):
        if data is None:
            data = {}

        if shared:
            # NOTE(kamo): Don't set manager as a field because Manager, which includes
            # weakref object, causes following error with method="spawn",
            # "TypeError: can't pickle weakref objects"
            self.cache = multiprocessing.Manager().dict(**data)
        else:
            self.manager = None
            self.cache = dict(**data)
        self.size = 0 
开发者ID:espnet,项目名称:espnet,代码行数:15,代码来源:sized_dict.py

示例4: __call__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def __call__(self,
                 instances: Iterable[Instance],
                 num_epochs: int = None,
                 shuffle: bool = True) -> Iterator[TensorDict]:

        # If you run it forever, the multiprocesses won't shut down correctly.
        # TODO(joelgrus) find a solution for this
        if num_epochs is None:
            raise ConfigurationError("Multiprocess Iterator must be run for a fixed number of epochs")

        manager = Manager()
        output_queue = manager.Queue(self.output_queue_size)
        input_queue = manager.Queue(self.output_queue_size * self.batch_size)

        # Start process that populates the queue.
        self.queuer = Process(target=_queuer, args=(instances, input_queue, self.num_workers, num_epochs))
        self.queuer.start()

        # Start the tensor-dict workers.
        for i in range(self.num_workers):
            args = (input_queue, output_queue, self.iterator, shuffle, i)
            process = Process(target=_create_tensor_dicts, args=args)
            process.start()
            self.processes.append(process)

        num_finished = 0
        while num_finished < self.num_workers:
            item = output_queue.get()
            if isinstance(item, int):
                num_finished += 1
                logger.info(f"worker {item} finished ({num_finished} / {self.num_workers})")
            else:
                yield item

        for process in self.processes:
            process.join()
        self.processes.clear()

        if self.queuer is not None:
            self.queuer.join()
            self.queuer = None 
开发者ID:jcyk,项目名称:gtos,代码行数:43,代码来源:multiprocess_iterator.py

示例5: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def __init__(self, env, pol, num_parallel=8, prepro=None, seed=256):
        self.env = env
        self.pol = copy.deepcopy(pol)
        self.pol.to('cpu')
        self.pol.share_memory()
        self.pol.eval()
        self.num_parallel = num_parallel

        self.n_steps_global = torch.tensor(0, dtype=torch.long).share_memory_()
        self.max_steps = torch.tensor(0, dtype=torch.long).share_memory_()
        self.n_epis_global = torch.tensor(
            0, dtype=torch.long).share_memory_()
        self.max_epis = torch.tensor(0, dtype=torch.long).share_memory_()

        self.exec_flags = [torch.tensor(
            0, dtype=torch.long).share_memory_() for _ in range(self.num_parallel)]
        self.deterministic_flag = torch.tensor(
            0, dtype=torch.uint8).share_memory_()

        self.epis = mp.Manager().list()
        self.processes = []
        for ind in range(self.num_parallel):
            p = mp.Process(target=mp_sample, args=(self.pol, env, self.max_steps, self.max_epis, self.n_steps_global,
                                                   self.n_epis_global, self.epis, self.exec_flags[ind], self.deterministic_flag, ind, prepro, seed))
            p.start()
            self.processes.append(p) 
开发者ID:DeepX-inc,项目名称:machina,代码行数:28,代码来源:epi_sampler.py

示例6: colorize

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def colorize(request, context):
        try:
            manager = Manager()
            return_dict = manager.dict()
            p = Process(target=mp_colorize, args=(request.img_input,
                                                  request.render_factor,
                                                  return_dict))
            p.start()
            p.join()

            response = return_dict.get("response", None)
            if not response or "error" in response:
                error_msg = response.get("error", None) if response else None
                log.error(error_msg)
                context.set_details(error_msg)
                context.set_code(grpc.StatusCode.INTERNAL)
                return Output()

            log.debug("colorize({})={}".format(request.img_input[:50], response["img_colorized"][:50]))
            return Output(img_colorized=response["img_colorized"])

        except Exception as e:
            traceback.print_exc()
            log.error(e)
            return Output()


# The gRPC serve function.
#
# Params:
# max_workers: pool of threads to execute calls asynchronously
# port: gRPC server port
#
# Add all your classes to the server here.
# (from generated .py files by protobuf compiler) 
开发者ID:singnet,项目名称:dnn-model-services,代码行数:37,代码来源:colorization_service.py

示例7: parallelize_sessions

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def parallelize_sessions(self, global_nets=None):
        mp_dict = mp.Manager().dict()
        workers = []
        spec = deepcopy(self.spec)
        for _s in range(spec['meta']['max_session']):
            spec_util.tick(spec, 'session')
            w = mp.Process(target=mp_run_session, args=(spec, global_nets, mp_dict))
            w.start()
            workers.append(w)
        for w in workers:
            w.join()
        session_metrics_list = [mp_dict[idx] for idx in sorted(mp_dict.keys())]
        return session_metrics_list 
开发者ID:kengz,项目名称:SLM-Lab,代码行数:15,代码来源:control.py

示例8: recognize_scene

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def recognize_scene(self, request, context):
        """Wraps the scene recognition model to make sure inputs and outputs match the service requirements."""

        # Store the names of the images to delete them afterwards
        created_images = []

        # Python command call arguments. Key = argument name, value = tuple(type, required?, default_value)
        arguments = {"input_image": ("image", True, None),
                     "predict": ("string", True, self.prediction_list)}

        # Treat inputs
        try:
            image_path, predict, file_index_str = self.treat_inputs(request, arguments, created_images)
        except HTTPError as e:
            error_message = "Error downloading the input image \n" + str(e)
            log.error(error_message)
            self.result.data = error_message
            context.set_details(error_message)
            context.set_code(grpc.StatusCode.INTERNAL)
            return self.result
        except Exception as e:
            log.error(e)
            self.result.data = e
            context.set_details(str(e))
            context.set_code(grpc.StatusCode.INTERNAL)
            return self.result

        # Get cam (color activation mappings) file path
        input_filename = os.path.split(created_images[0])[1]
        log.debug("Input file name: {}".format(input_filename))
        output_image_path = self.output_dir + '/' + input_filename
        log.debug("Output image path (cam_path): {}".format(output_image_path))
        created_images.append(output_image_path)

        manager = Manager()
        return_dict = manager.dict()
        p = Process(target=mp_recognize, args=(image_path,
                                               predict,
                                               output_image_path,
                                               return_dict))
        p.start()
        p.join()

        response = return_dict.get("response", None)
        if not response or "error" in response:
            error_msg = response.get("error", None) if response else None
            log.error(error_msg)
            context.set_details(error_msg)
            context.set_code(grpc.StatusCode.INTERNAL)
            return SceneRecognitionResult()

        # Prepare gRPC output message
        self.result = SceneRecognitionResult()
        log.debug("Got result.")
        self.result.data = json.dumps(response)
        log.debug("Output generated. Service successfully completed.")

        for image in created_images:
            service.serviceUtils.clear_file(image)

        return self.result 
开发者ID:singnet,项目名称:dnn-model-services,代码行数:63,代码来源:scene_recognition_service.py

示例9: per_step

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def per_step(valLoader,
             model,
             criterion,
             downsamplingFactor):

    model.eval()
    criterion.eval()

    avgPER = 0
    varPER = 0
    nItems = 0

    print("Starting the PER computation through beam search")
    bar = progressbar.ProgressBar(maxval=len(valLoader))
    bar.start()

    for index, data in enumerate(valLoader):

        bar.update(index)

        with torch.no_grad():
            seq, sizeSeq, phone, sizePhone = prepare_data(data)
            c_feature = model(seq)
            sizeSeq = sizeSeq / downsamplingFactor
            predictions = torch.nn.functional.softmax(criterion.getPrediction(c_feature),
                                                      dim=2).cpu()
            c_feature = c_feature
            phone = phone.cpu()
            sizeSeq = sizeSeq.cpu()
            sizePhone = sizePhone.cpu()

            mutex = Lock()
            manager = Manager()
            poolData = manager.list()

            processes = []
            for b in range(sizeSeq.size(0)):
                l_ = min(sizeSeq[b] // 4, predictions.size(1))
                s_ = sizePhone[b]
                p = torch.multiprocessing.Process(target=get_local_per,
                                                  args=(poolData, mutex, predictions[b, :l_].view(l_, -1).numpy(),
                                                        phone[b, :s_].view(-1).numpy().astype(np.int32), criterion.BLANK_LABEL))
                p.start()
                processes.append(p)
            for p in processes:
                p.join()

            avgPER += sum([x for x in poolData])
            varPER += sum([x*x for x in poolData])
            nItems += len(poolData)

    bar.finish()

    avgPER /= nItems
    varPER /= nItems

    varPER -= avgPER**2
    print(f"Average PER {avgPER}")
    print(f"Standard deviation PER {math.sqrt(varPER)}") 
开发者ID:facebookresearch,项目名称:libri-light,代码行数:61,代码来源:seq_alignment.py

示例10: propagate

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def propagate(nnf, feat_A, feat_AP, feat_B, feat_BP, patch_size, iters=2, rand_search_radius=200):
    print("\tpatch_size:{}; num_iters:{}; rand_search_radius:{}".format(patch_size, iters, rand_search_radius))

    nnd = np.zeros(nnf.shape[:2])
    A_size = feat_A.shape[:2]
    B_size = feat_B.shape[:2]

    for ay in range(A_size[0]):
        for ax in range(A_size[1]):
            by, bx = nnf[ay, ax]
            nnd[ay, ax] = cal_dist(ay, ax, by, bx, feat_A, feat_AP, feat_B, feat_BP, A_size, B_size, patch_size)

    manager = mp.Manager()
    q = manager.Queue(A_size[1] * A_size[0])
    cpus = min(mp.cpu_count(), A_size[0] // 20 + 1)
    for i in range(iters):

        p = Pool(cpus)

        ay_start = 0

        while ay_start < A_size[0]:
            ax_start = 0
            while ax_start < A_size[1]:
                p.apply_async(pixelmatch, args=(q, ax_start, ay_start,
                                                cpus,
                                                nnf, nnd,
                                                A_size, B_size,
                                                feat_A, feat_AP,
                                                feat_B, feat_BP,
                                                patch_size,
                                                rand_search_radius,))

                ax_start += A_size[1] // cpus + 1
            ay_start += A_size[0] // cpus + 1

        p.close()
        p.join()

        while not q.empty():
            ax, ay, xbest, ybest, dbest = q.get()

            nnf[ay, ax] = np.array([ybest, xbest])
            nnd[ay, ax] = dbest

    return nnf, nnd 
开发者ID:Ben-Louis,项目名称:Deep-Image-Analogy-PyTorch,代码行数:48,代码来源:PatchMatchOrig.py

示例11: run_tracker

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Manager [as 别名]
def run_tracker(self):
        """
        Run self.pipeline on DAVIS
        """
        num_gpu = self._hyper_params["device_num"]
        all_devs = [torch.device("cuda:%d" % i) for i in range(num_gpu)]
        logger.info('runing test on devices {}'.format(all_devs))
        davis_root = self._hyper_params["data_root"]
        logger.info('Using dataset %s at: %s' % (self.dataset_name, davis_root))
        # setup dataset
        dataset = davis_benchmark.load_dataset(davis_root, self.dataset_name)
        self.dataset = dataset
        keys = list(dataset.keys())
        keys.sort()
        nr_records = len(keys)
        pbar = tqdm(total=nr_records)
        mean_speed = -1
        speed_list = []
        manager = Manager()
        speed_queue = manager.Queue(500)
        # set worker
        if num_gpu == 0:
            self.worker(keys, all_devs[0], self.dataset, speed_queue)
            for i in range(nr_records):
                s = speed_queue.get()
                speed_list.append(s)
                pbar.update(1)
        else:
            nr_video = math.ceil(nr_records / num_gpu)
            procs = []
            for i in range(num_gpu):
                start = i * nr_video
                end = min(start + nr_video, nr_records)
                split_records = keys[start:end]
                proc = mp.Process(target=self.worker,
                                  args=(split_records, all_devs[i],
                                        self.dataset, speed_queue))
                logger.info('process:%d, start:%d, end:%d' % (i, start, end))
                proc.start()
                procs.append(proc)
            for i in range(nr_records):
                s = speed_queue.get()
                speed_list.append(s)
                pbar.update(1)
            for p in procs:
                p.join()
        # print result
        mean_speed = float(np.mean(speed_list))
        logger.info('Mean Speed: {:.2f} FPS'.format(mean_speed))
        self._state['speed'] = mean_speed 
开发者ID:MegviiDetection,项目名称:video_analyst,代码行数:52,代码来源:davis.py


注:本文中的torch.multiprocessing.Manager方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。