当前位置: 首页>>代码示例>>Python>>正文


Python multiprocessing.Queue方法代码示例

本文整理汇总了Python中torch.multiprocessing.Queue方法的典型用法代码示例。如果您正苦于以下问题:Python multiprocessing.Queue方法的具体用法?Python multiprocessing.Queue怎么用?Python multiprocessing.Queue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Queue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _queuer

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def _queuer(instances: Iterable[Instance],
            input_queue: Queue,
            num_workers: int,
            num_epochs: Optional[int]) -> None:
    """
    Reads Instances from the iterable and puts them in the input_queue.
    """
    epoch = 0

    while num_epochs is None or epoch < num_epochs:
        epoch += 1
        for instance in instances:
            input_queue.put(instance)

    # Now put a None for each worker, since each needs to receive one
    # to know that it's done.
    for _ in range(num_workers):
        input_queue.put(None) 
开发者ID:jcyk,项目名称:gtos,代码行数:20,代码来源:multiprocess_iterator.py

示例2: thread_wrapped_func

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def thread_wrapped_func(func):
    """
    Wraps a process entry point to make it work with OpenMP.
    """
    @wraps(func)
    def decorated_function(*args, **kwargs):
        queue = Queue()
        def _queue_result():
            exception, trace, res = None, None, None
            try:
                res = func(*args, **kwargs)
            except Exception as e:
                exception = e
                trace = traceback.format_exc()
            queue.put((res, exception, trace))

        start_new_thread(_queue_result, ())
        result, exception, trace = queue.get()
        if exception is None:
            return result
        else:
            assert isinstance(exception, Exception)
            raise exception.__class__(trace)
    return decorated_function 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:train_sampling.py

示例3: thread_wrapped_func

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def thread_wrapped_func(func):
    """
    Wraps a process entry point to make it work with OpenMP.
    """
    @wraps(func)
    def decorated_function(*args, **kwargs):
        queue = mp.Queue()
        def _queue_result():
            exception, trace, res = None, None, None
            try:
                res = func(*args, **kwargs)
            except Exception as e:
                exception = e
                trace = traceback.format_exc()
            queue.put((res, exception, trace))

        start_new_thread(_queue_result, ())
        result, exception, trace = queue.get()
        if exception is None:
            return result
        else:
            assert isinstance(exception, Exception)
            raise exception.__class__(trace)
    return decorated_function 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:utils.py

示例4: thread_wrapped_func

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def thread_wrapped_func(func):
    @wraps(func)
    def decorated_function(*args, **kwargs):
        queue = mp.Queue()
        def _queue_result():
            exception, trace, res = None, None, None
            try:
                res = func(*args, **kwargs)
            except Exception as e:
                exception = e
                trace = traceback.format_exc()
            queue.put((res, exception, trace))

        start_new_thread(_queue_result, ())
        result, exception, trace = queue.get()
        if exception is None:
            return result
        else:
            assert isinstance(exception, Exception)
            raise exception.__class__(trace)
    return decorated_function 
开发者ID:dmlc,项目名称:dgl,代码行数:23,代码来源:train_cv_multi_gpu.py

示例5: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def __init__(self):

        # num of task training in parallel
        self.N = 3
        # inner-loop update iteration
        self.K = 10

        # each task has individual concept and output network, we deploy them on distinct GPUs and
        # merge into a list.
        self.concepts = []
        self.outlayers = []
        self.optimizer = None

        # to save async multi-tasks' loss and accuracy
        self.Q = multiprocessing.Queue()

        print('please call deploy() func to deploy networks. DO NOT call cuda() explicitly.') 
开发者ID:dragen1860,项目名称:MAML-Pytorch,代码行数:19,代码来源:csmlv0.py

示例6: _create_tensor_dicts

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def _create_tensor_dicts(input_queue: Queue,
                         output_queue: Queue,
                         iterator: DataIterator,
                         shuffle: bool,
                         index: int) -> None:
    """
    Pulls at most ``max_instances_in_memory`` from the input_queue,
    groups them into batches of size ``batch_size``, converts them
    to ``TensorDict`` s, and puts them on the ``output_queue``.
    """
    def instances() -> Iterator[Instance]:
        instance = input_queue.get()
        while instance is not None:
            yield instance
            instance = input_queue.get()

    for tensor_dict in iterator(instances(), num_epochs=1, shuffle=shuffle):
        output_queue.put(tensor_dict)

    output_queue.put(index) 
开发者ID:jcyk,项目名称:gtos,代码行数:22,代码来源:multiprocess_iterator.py

示例7: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def __init__(self, cache, maxsize):
        self._cond = threading.Condition(threading.Lock())

        self._empty_sema = threading.Semaphore(maxsize)
        # self._full_sema = threading.Semaphore(0)

        self._job = job_counter.next()
        self._cache = cache
        # self._items = collections.deque()
        self._items = Queue(maxsize)
        # print self._items.maxsize

        self._index = 0
        # self._put_index = 0
        # self._get_index = 0
        self._length = None
        #
        # self._get_lock = threading.Lock()
        # self._put_lock = threading.Lock()

        self._unsorted = {}
        cache[self._job] = self 
开发者ID:CharlesShang,项目名称:Detectron-PYTORCH,代码行数:24,代码来源:pool.py

示例8: async_mol2graph

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def async_mol2graph(q: Queue, 
                    data: MoleculeDataset, 
                    args: Namespace,
                    num_iters: int,
                    iter_size: int,
                    exit_q: Queue,
                    last_batch: bool=False):
    batches = []
    for i in range(0, num_iters, iter_size):  # will only go up to max size of queue, then yield
        if not last_batch and i + args.batch_size > len(data):
            break
        batch = MoleculeDataset(data[i:i + args.batch_size])
        batches.append(batch)
        if len(batches) == args.batches_per_queue_group:  # many at a time, since synchronization is expensive
            with Pool() as pool:
                processed_batches = pool.map(mol2graph_helper, [(batch, args) for batch in batches])
            q.put(processed_batches)
            batches = []
    if len(batches) > 0:
        with Pool() as pool:
            processed_batches = pool.map(mol2graph_helper, [(batch, args) for batch in batches])
        q.put(processed_batches)
    exit_q.get()  # prevent from exiting until main process tells it to; otherwise we apparently can't read the end of the queue and crash 
开发者ID:wengong-jin,项目名称:chemprop,代码行数:25,代码来源:async_featurization.py

示例9: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def __init__(self, im_names, batchSize=1, format='yolo', queueSize=50):
        self.img_dir = opt.inputpath
        self.imglist = im_names
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
        self.format = format

        self.batchSize = batchSize
        self.datalen = len(self.imglist)
        leftover = 0
        if (self.datalen) % batchSize:
            leftover = 1
        self.num_batches = self.datalen // batchSize + leftover

        # initialize the queue used to store data
        if opt.sp:
            self.Q = Queue(maxsize=queueSize)
        else:
            self.Q = mp.Queue(maxsize=queueSize) 
开发者ID:zh-plus,项目名称:video-to-pose3D,代码行数:23,代码来源:dataloader.py

示例10: __iter__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def __iter__(self):
    queue = Queue(self.buffer_size)
    dl=iter(self.dataloader)
    def _worker():
      while True:
        try:
          queue.put(next(dl))
        except StopIteration:
          break
      queue.put(None)
    t=Thread(target=_worker)
    t.start()
    while True:
      d = queue.get()
      if d is None:
        break
      yield d
    del t
    del queue 
开发者ID:namisan,项目名称:mt-dnn,代码行数:21,代码来源:dataloader.py

示例11: thread_wrapped_func

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def thread_wrapped_func(func):
    """Wrapped func for torch.multiprocessing.Process.
    With this wrapper we can use OMP threads in subprocesses
    otherwise, OMP_NUM_THREADS=1 is mandatory.
    How to use:
    @thread_wrapped_func
    def func_to_wrap(args ...):
    """
    @wraps(func)
    def decorated_function(*args, **kwargs):
        queue = mp.Queue()
        def _queue_result():
            exception, trace, res = None, None, None
            try:
                res = func(*args, **kwargs)
            except Exception as e:
                exception = e
                trace = traceback.format_exc()
            queue.put((res, exception, trace))

        start_new_thread(_queue_result, ())
        result, exception, trace = queue.get()
        if exception is None:
            return result
        else:
            assert isinstance(exception, Exception)
            raise exception.__class__(trace)
    return decorated_function 
开发者ID:dmlc,项目名称:dgl,代码行数:30,代码来源:utils.py

示例12: create_async_update

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def create_async_update(self):
        """Set up the async update subprocess.
        """
        self.async_q = Queue(1)
        self.async_p = mp.Process(target=async_update, args=(self.args, self, self.async_q))
        self.async_p.start() 
开发者ID:dmlc,项目名称:dgl,代码行数:8,代码来源:tensor_models.py

示例13: run_in_process_group

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def run_in_process_group(world_size, filename, fn, inputs):
    if torch.distributed.is_initialized():
        torch.distributed.destroy_process_group()
    processes = []
    q = Queue()
    wait_event = Event()

    # run the remaining processes
    # for rank in range(world_size - 1):
    for rank in range(world_size):
        p = Process(
            target=init_and_run_process,
            args=(rank, world_size, filename, fn, inputs[rank], q, wait_event),
        )
        p.start()
        processes.append(p)

    # fetch the results from the queue before joining, the background processes
    # need to be alive if the queue contains tensors. See
    # https://discuss.pytorch.org/t/using-torch-tensor-over-multiprocessing-queue-process-fails/2847/3  # noqa: B950
    results = []
    for _ in range(len(processes)):
        results.append(q.get())

    wait_event.set()

    for p in processes:
        p.join()
    return results 
开发者ID:facebookresearch,项目名称:ClassyVision,代码行数:31,代码来源:generic_distributed_util_test.py

示例14: __init__

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def __init__(self, model, env,
               sampler_kind=Sampler,
               trajectory_kind=Trajectory,
               n_workers=4,
               max_items=64):
    self.n_workers = n_workers
    self.model = SharedModel(deepcopy(model))
    self.env = env
    self.done = Flag(0)
    self.max_items = max_items
    self.queue = mp.Queue(maxsize=2 * max_items)
    self.procs = []
    self.sampler_kind = sampler_kind
    self.trajectory_kind = trajectory_kind 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:16,代码来源:data.py

示例15: parallel_evaluate

# 需要导入模块: from torch import multiprocessing [as 别名]
# 或者: from torch.multiprocessing import Queue [as 别名]
def parallel_evaluate(s, bufs, target, dev_id, number=10, timeout=10.0):
    proc = []
    q = multi.Queue()
    for i in range(number):
        p = multi.Process(target=_evaluate, args=(s, bufs, target, dev_id, 1, q))
        p.start()
        proc.append(p)
    beg = time.time()
    while time.time() - beg < timeout:
        if any(p.is_alive() for p in proc):
            time.sleep(.1)
        else:
            break
    else:
        for p in proc:
            p.terminate()
            p.join()
    count = 0
    sum = 0
    while not q.empty():
        sum += q.get()
        count += 1
    while count < number:
        sum += timeout * 1e3
        count += 1
    return sum / count 
开发者ID:KnowingNothing,项目名称:FlexTensor,代码行数:28,代码来源:measure.py


注:本文中的torch.multiprocessing.Queue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。