当前位置: 首页>>代码示例>>Python>>正文


Python parallel.data_parallel方法代码示例

本文整理汇总了Python中torch.nn.parallel.data_parallel方法的典型用法代码示例。如果您正苦于以下问题:Python parallel.data_parallel方法的具体用法?Python parallel.data_parallel怎么用?Python parallel.data_parallel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.parallel的用法示例。


在下文中一共展示了parallel.data_parallel方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: apply_model

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def apply_model(self, inputs: Dict[str, torch.Tensor]) -> torch.Tensor:
        r"""Apply model forward.

        Args:
            inputs (Dict[str, T]): Dictionary to input to sequential.
        
        Returns:
            torch.Tensor: output of sequential.
        """
        if hasattr(self, "_base_device_ordinal"):
            base_device_ordinal = self._base_device_ordinal
        else:
            base_device_ordinal = None
        
        if self._devices is not None:
            return nn_parallel.data_parallel(self._sequential, inputs, list(self._devices), output_device=base_device_ordinal)
        else:
            return self._sequential(inputs) 
开发者ID:p768lwy3,项目名称:torecsys,代码行数:20,代码来源:trainer.py

示例2: forward

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def forward(self, x, idx_scale):
        self.idx_scale = idx_scale
        if hasattr(self.model, 'set_scale'):
            self.model.set_scale(idx_scale)

        if self.training:
            if self.n_GPUs > 1:
                return P.data_parallel(self.model, x, range(self.n_GPUs))
            else:
                return self.model(x)
        else:
            if self.chop:
                forward_function = self.forward_chop
            else:
                forward_function = self.model.forward

            if self.self_ensemble:
                return self.forward_x8(x, forward_function=forward_function)
            else:
                return forward_function(x) 
开发者ID:thstkdgus35,项目名称:EDSR-PyTorch,代码行数:22,代码来源:__init__.py

示例3: update_core

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def update_core(self):
        """Update the model."""
        # When we pass one iterator and optimizer to StandardUpdater.__init__,
        # they are automatically named 'main'.
        train_iter = self.get_iterator("main")
        optimizer = self.get_optimizer("main")
        # Progress the dataset iterator for sentences at each iteration.
        self.model.zero_grad()  # Clear the parameter gradients
        accum = {"loss": 0.0, "nll": 0.0, "count": 0}
        for _ in range(self.accum_grad):
            batch = train_iter.__next__()
            # Concatenate the token IDs to matrices and send them to the device
            # self.converter does this job
            # (it is chainer.dataset.concat_examples by default)
            x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
            if self.device[0] == -1:
                loss, nll, count = self.model(x, t)
            else:
                # apex does not support torch.nn.DataParallel
                loss, nll, count = data_parallel(self.model, (x, t), self.device)

            # backward
            loss = loss.mean() / self.accum_grad
            if self.use_apex:
                from apex import amp

                with amp.scale_loss(loss, optimizer) as scaled_loss:
                    scaled_loss.backward()
            else:
                loss.backward()  # Backprop
            # accumulate stats
            accum["loss"] += float(loss)
            accum["nll"] += float(nll.sum())
            accum["count"] += int(count.sum())

        for k, v in accum.items():
            reporter.report({k: v}, optimizer.target)
        if self.gradclip is not None:
            nn.utils.clip_grad_norm_(self.model.parameters(), self.gradclip)
        optimizer.step()  # Update the parameters
        self.scheduler.step(n_iter=self.iteration) 
开发者ID:espnet,项目名称:espnet,代码行数:43,代码来源:lm.py

示例4: evaluate

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def evaluate(self):
        """Evaluate the model."""
        val_iter = self.get_iterator("main")
        loss = 0
        nll = 0
        count = 0
        self.model.eval()
        with torch.no_grad():
            for batch in copy.copy(val_iter):
                x, t = concat_examples(batch, device=self.device[0], padding=(0, -100))
                if self.device[0] == -1:
                    l, n, c = self.model(x, t)
                else:
                    # apex does not support torch.nn.DataParallel
                    l, n, c = data_parallel(self.model, (x, t), self.device)
                loss += float(l.sum())
                nll += float(n.sum())
                count += int(c.sum())
        self.model.train()
        # report validation loss
        observation = {}
        with reporter.report_scope(observation):
            reporter.report({"loss": loss}, self.model.reporter)
            reporter.report({"nll": nll}, self.model.reporter)
            reporter.report({"count": count}, self.model.reporter)
        return observation 
开发者ID:espnet,项目名称:espnet,代码行数:28,代码来源:lm.py

示例5: encode

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def encode(self, x):  
        mu, logvar = data_parallel(self.encoder, x)
        return mu, logvar 
开发者ID:hhb072,项目名称:IntroVAE,代码行数:5,代码来源:networks.py

示例6: decode

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def decode(self, z):        
        y = data_parallel(self.decoder, z)
        return y 
开发者ID:hhb072,项目名称:IntroVAE,代码行数:5,代码来源:networks.py

示例7: encode

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def encode(self, inputs, hidden=None, device_ids=None):
        if isinstance(device_ids, tuple):
            return data_parallel(self.encoder, (inputs, hidden),
                                 device_ids=device_ids,
                                 dim=0 if self.encoder.batch_first else 1)
        else:
            return self.encoder(inputs, hidden) 
开发者ID:eladhoffer,项目名称:seq2seq.pytorch,代码行数:9,代码来源:seq2seq_base.py

示例8: decode

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def decode(self, *kargs, **kwargs):
        device_ids = kwargs.pop('device_ids', None)
        if isinstance(device_ids, tuple):
            return data_parallel(self.decoder, *kargs, **kwargs,
                                 device_ids=device_ids,
                                 dim=0 if self.decoder.batch_first else 1)
        else:
            return self.decoder(*kargs, **kwargs) 
开发者ID:eladhoffer,项目名称:seq2seq.pytorch,代码行数:10,代码来源:seq2seq_base.py

示例9: forward_chop

# 需要导入模块: from torch.nn import parallel [as 别名]
# 或者: from torch.nn.parallel import data_parallel [as 别名]
def forward_chop(self, *args, shave=10, min_size=160000):
        scale = 1 if self.input_large else self.scale[self.idx_scale]
        n_GPUs = min(self.n_GPUs, 4)
        # height, width
        h, w = args[0].size()[-2:]

        top = slice(0, h//2 + shave)
        bottom = slice(h - h//2 - shave, h)
        left = slice(0, w//2 + shave)
        right = slice(w - w//2 - shave, w)
        x_chops = [torch.cat([
            a[..., top, left],
            a[..., top, right],
            a[..., bottom, left],
            a[..., bottom, right]
        ]) for a in args]

        y_chops = []
        if h * w < 4 * min_size:
            for i in range(0, 4, n_GPUs):
                x = [x_chop[i:(i + n_GPUs)] for x_chop in x_chops]
                y = P.data_parallel(self.model, *x, range(n_GPUs))
                if not isinstance(y, list): y = [y]
                if not y_chops:
                    y_chops = [[c for c in _y.chunk(n_GPUs, dim=0)] for _y in y]
                else:
                    for y_chop, _y in zip(y_chops, y):
                        y_chop.extend(_y.chunk(n_GPUs, dim=0))
        else:
            for p in zip(*x_chops):
                y = self.forward_chop(*p, shave=shave, min_size=min_size)
                if not isinstance(y, list): y = [y]
                if not y_chops:
                    y_chops = [[_y] for _y in y]
                else:
                    for y_chop, _y in zip(y_chops, y): y_chop.append(_y)

        h *= scale
        w *= scale
        top = slice(0, h//2)
        bottom = slice(h - h//2, h)
        bottom_r = slice(h//2 - h, None)
        left = slice(0, w//2)
        right = slice(w - w//2, w)
        right_r = slice(w//2 - w, None)

        # batch size, number of color channels
        b, c = y_chops[0][0].size()[:-2]
        y = [y_chop[0].new(b, c, h, w) for y_chop in y_chops]
        for y_chop, _y in zip(y_chops, y):
            _y[..., top, left] = y_chop[0][..., top, left]
            _y[..., top, right] = y_chop[1][..., top, right_r]
            _y[..., bottom, left] = y_chop[2][..., bottom_r, left]
            _y[..., bottom, right] = y_chop[3][..., bottom_r, right_r]

        if len(y) == 1: y = y[0]

        return y 
开发者ID:thstkdgus35,项目名称:EDSR-PyTorch,代码行数:60,代码来源:__init__.py


注:本文中的torch.nn.parallel.data_parallel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。