當前位置: 首頁>>代碼示例>>Python>>正文


Python multiprocessing.Lock方法代碼示例

本文整理匯總了Python中torch.multiprocessing.Lock方法的典型用法代碼示例。如果您正苦於以下問題:Python multiprocessing.Lock方法的具體用法?Python multiprocessing.Lock怎麽用?Python multiprocessing.Lock使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.multiprocessing的用法示例。


在下文中一共展示了multiprocessing.Lock方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, data_scheme, n_bs, n_t, n_agents, batch_size, is_cuda=True, is_shared_mem=True, logging_struct=None):
        self.buffer = BatchEpisodeBuffer(data_scheme=data_scheme,
                                             n_bs=n_bs,
                                             n_t=n_t,
                                             n_agents=n_agents,
                                             is_cuda=is_cuda,
                                             is_shared_mem=True)


        if is_cuda:
            self._to_cuda()
        if is_shared_mem:
            self._to_shared_mem()
        self.queue_head_pos = 0
        self.lock = mp.Lock() # TODO: could make locks more granular!
        self.len = 0
        pass 
開發者ID:schroederdewitt,項目名稱:mackrl,代碼行數:19,代碼來源:replay_buffer.py

示例2: create

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def create(cls):
        """Singleton factory."""
        if not hasattr(cls, 'length_to_eps'):
            # Maps episode length to list of episodes
            cls.length_to_eps = {}
            # Set of episode indices already in the cache
            cls.ep_indices = set()
            # List of batches if popping batches
            cls.batches = []
            # If all episodes have been loaded into memory
            cls.load_complete = Value(ctypes.c_bool, False)
            # Lock to access batches
            cls.batches_lock = Lock()
            # Lock to access length_to_eps
            cls.cache_lock = Lock()
            # Lock for condition variables
            cls.fill_cache_lock = RLock()
            # Condition notifying Loader to add to cache
            cls.add_to_cache_cv = Condition(lock=cls.fill_cache_lock)
            # Condition notifying teacher that cache has episodes
            cls.cache_filled_cv = Condition(lock=cls.fill_cache_lock) 
開發者ID:natashamjaques,項目名稱:neural_chat,代碼行數:23,代碼來源:pytorch_data_teacher.py

示例3: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, val=True):
        self.val = mp.Value("b", False)
        self.lock = mp.Lock() 
開發者ID:llSourcell,項目名稱:OpenAI_Five_vs_Dota2_Explained,代碼行數:5,代碼來源:utils.py

示例4: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self):
    self.val = mp.Value('i', 0)
    self.lock = mp.Lock() 
開發者ID:Kaixhin,項目名稱:Dist-A3C,代碼行數:5,代碼來源:utils.py

示例5: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, args):
        super(SharedMemory, self).__init__(args)

        # params for this memory

        # setup
        self.pos = mp.Value('l', 0)
        self.full = mp.Value('b', False)

        if self.tensortype == torch.FloatTensor:
            self.state0s = torch.zeros((self.memory_size, ) + tuple(self.state_shape), dtype=torch.float32)
            self.state1s = torch.zeros((self.memory_size, ) + tuple(self.state_shape), dtype=torch.float32)
        elif self.tensortype == torch.ByteTensor:
            self.state0s = torch.zeros((self.memory_size, ) + tuple(self.state_shape), dtype=torch.uint8)
            self.state1s = torch.zeros((self.memory_size, ) + tuple(self.state_shape), dtype=torch.uint8)
        self.actions = torch.zeros( self.memory_size, self.action_shape)
        self.rewards = torch.zeros( self.memory_size, self.reward_shape)
        self.gamma1s = torch.zeros( self.memory_size, self.gamma_shape)
        self.terminal1s = torch.zeros(self.memory_size, self.terminal_shape)

        self.state0s.share_memory_()
        self.actions.share_memory_()
        self.rewards.share_memory_()
        self.gamma1s.share_memory_()
        self.state1s.share_memory_()
        self.terminal1s.share_memory_()

        self.memory_lock = mp.Lock() 
開發者ID:jingweiz,項目名稱:pytorch-distributed,代碼行數:30,代碼來源:shared_memory.py

示例6: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, data):
    self.lock = mp.Lock()
    self.data = mp.Value("i", data) 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:5,代碼來源:data.py

示例7: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, x, y):
    self.ctrl = ReadWriteControl(self)
    self.ctrl_flick = mp.Lock()
    self.which_buffer = mp.Value("l", 0)
    self.buffers = [x, y] 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:7,代碼來源:buffer.py

示例8: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, owner):
    self.owner = owner
    self.read_lock = mp.Lock()
    self.write_lock = mp.Lock()
    self.read_count = mp.Value("l", 0)
    self.read_count.value = 0

    self.timestamp = mp.Value("l", 0)
    self.local_timestamp = 0 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:11,代碼來源:control.py

示例9: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self, grad_norm, optimizer, scheduler):
        self.optimizer : torch.optim.Optimizer = optimizer
        self.scheduler = scheduler
        self.grad_norm = grad_norm
        self.global_step = torch.tensor(0)
        self.lock = mp.Lock() 
開發者ID:jkulhanek,項目名稱:visual-navigation-agent-pytorch,代碼行數:8,代碼來源:training.py

示例10: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self):
        self.val = mp.Value('i', 0)
        self.lock = mp.Lock() 
開發者ID:Feryal,項目名稱:a3c-mujoco,代碼行數:5,代碼來源:utils.py

示例11: create

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def create(cls):
        if not hasattr(cls, 'length_to_eps'):
            # Maps episode length to list of episodes
            cls.length_to_eps = {}
        if not hasattr(cls, 'ep_indices'):
            # Set of episode indices already in the cache
            cls.ep_indices = set()
        if not hasattr(cls, 'batches'):
            # List of batches if popping batches
            cls.batches = []
        if not hasattr(cls, 'load_complete'):
            # If all episodes have been loaded into memory
            cls.load_complete = Value(ctypes.c_bool, False)
        if not hasattr(cls, 'batches_lock'):
            # Lock to access batches
            cls.batches_lock = Lock()
        if not hasattr(cls, 'cache_lock'):
            # Lock to access length_to_eps
            cls.cache_lock = Lock()
        if not hasattr(cls, 'fill_cache_lock'):
            # Lock for condition variables
            cls.fill_cache_lock = RLock()
        if not hasattr(cls, 'add_to_cache_cv'):
            # Condition notifying Loader to add to cache
            cls.add_to_cache_cv = Condition(lock=cls.fill_cache_lock)
        if not hasattr(cls, 'cache_filled_cv'):
            # Condition notifying teacher that cache has episodes
            cls.cache_filled_cv = Condition(lock=cls.fill_cache_lock) 
開發者ID:THUDM,項目名稱:KBRD,代碼行數:30,代碼來源:pytorch_data_teacher.py

示例12: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self):
        self.val = mp.Value("b", False)
        self.lock = mp.Lock() 
開發者ID:TianhongDai,項目名稱:distributed-ppo,代碼行數:5,代碼來源:utils.py

示例13: __init__

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def __init__(self,
                 env_name,
                 env_kwargs,
                 batch_size,
                 policy,
                 baseline,
                 env=None,
                 seed=None,
                 num_workers=1):
        super(MultiTaskSampler, self).__init__(env_name,
                                               env_kwargs,
                                               batch_size,
                                               policy,
                                               seed=seed,
                                               env=env)

        self.num_workers = num_workers

        self.task_queue = mp.JoinableQueue()
        self.train_episodes_queue = mp.Queue()
        self.valid_episodes_queue = mp.Queue()
        policy_lock = mp.Lock()

        self.workers = [SamplerWorker(index,
                                      env_name,
                                      env_kwargs,
                                      batch_size,
                                      self.env.observation_space,
                                      self.env.action_space,
                                      self.policy,
                                      deepcopy(baseline),
                                      self.seed,
                                      self.task_queue,
                                      self.train_episodes_queue,
                                      self.valid_episodes_queue,
                                      policy_lock)
            for index in range(num_workers)]

        for worker in self.workers:
            worker.daemon = True
            worker.start()

        self._waiting_sample = False
        self._event_loop = asyncio.get_event_loop()
        self._train_consumer_thread = None
        self._valid_consumer_thread = None 
開發者ID:tristandeleu,項目名稱:pytorch-maml-rl,代碼行數:48,代碼來源:multi_task_sampler.py

示例14: per_step

# 需要導入模塊: from torch import multiprocessing [as 別名]
# 或者: from torch.multiprocessing import Lock [as 別名]
def per_step(valLoader,
             model,
             criterion,
             downsamplingFactor):

    model.eval()
    criterion.eval()

    avgPER = 0
    varPER = 0
    nItems = 0

    print("Starting the PER computation through beam search")
    bar = progressbar.ProgressBar(maxval=len(valLoader))
    bar.start()

    for index, data in enumerate(valLoader):

        bar.update(index)

        with torch.no_grad():
            seq, sizeSeq, phone, sizePhone = prepare_data(data)
            c_feature = model(seq)
            sizeSeq = sizeSeq / downsamplingFactor
            predictions = torch.nn.functional.softmax(criterion.getPrediction(c_feature),
                                                      dim=2).cpu()
            c_feature = c_feature
            phone = phone.cpu()
            sizeSeq = sizeSeq.cpu()
            sizePhone = sizePhone.cpu()

            mutex = Lock()
            manager = Manager()
            poolData = manager.list()

            processes = []
            for b in range(sizeSeq.size(0)):
                l_ = min(sizeSeq[b] // 4, predictions.size(1))
                s_ = sizePhone[b]
                p = torch.multiprocessing.Process(target=get_local_per,
                                                  args=(poolData, mutex, predictions[b, :l_].view(l_, -1).numpy(),
                                                        phone[b, :s_].view(-1).numpy().astype(np.int32), criterion.BLANK_LABEL))
                p.start()
                processes.append(p)
            for p in processes:
                p.join()

            avgPER += sum([x for x in poolData])
            varPER += sum([x*x for x in poolData])
            nItems += len(poolData)

    bar.finish()

    avgPER /= nItems
    varPER /= nItems

    varPER -= avgPER**2
    print(f"Average PER {avgPER}")
    print(f"Standard deviation PER {math.sqrt(varPER)}") 
開發者ID:facebookresearch,項目名稱:libri-light,代碼行數:61,代碼來源:seq_alignment.py


注:本文中的torch.multiprocessing.Lock方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。