當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.ceil方法代碼示例

本文整理匯總了Python中torch.ceil方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.ceil方法的具體用法?Python torch.ceil怎麽用?Python torch.ceil使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.ceil方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: decode

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def decode(self, data_loader):
        self.model.eval()
        with torch.no_grad():
            for xs, frame_lens, filenames in data_loader:
                # predict phones using AM
                if self.use_cuda:
                    xs = xs.cuda(non_blocking=True)
                ys_hat, frame_lens = self.model(xs, frame_lens)
                #frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # decode using Kaldi's latgen decoder
                # no need to normalize posteriors with state priors when we use CTC
                # https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43908.pdf
                if self.use_cuda:
                    ys_hat = ys_hat.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(ys_hat, frame_lens)
                # print results
                ys_hat = [y[:s] for y, s in zip(ys_hat, frame_lens)]
                words = [w[:s] for w, s in zip(words, w_sizes)]
                for results in zip(filenames, ys_hat, words):
                    self.print_result(*results) 
開發者ID:jinserk,項目名稱:pytorch-asr,代碼行數:22,代碼來源:predictor.py

示例2: decode

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def decode(self, data_loader):
        self.model.eval()
        with torch.no_grad():
            for i, (data) in enumerate(data_loader):
                # predict phones using AM
                xs, frame_lens, filenames = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # decode using Kaldi's latgen decoder
                # no need to normalize posteriors with state priors when we use CTC
                # https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/43908.pdf
                loglikes = torch.log(ys_hat)
                if self.use_cuda:
                    loglikes = loglikes.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(loglikes, frame_lens)
                # print results
                loglikes = [l[:s] for l, s in zip(loglikes, frame_lens)]
                words = [w[:s] for w, s in zip(words, w_sizes)]
                for results in zip(filenames, loglikes, words):
                    self.print_result(*results) 
開發者ID:jinserk,項目名稱:pytorch-asr,代碼行數:24,代碼來源:predict.py

示例3: validate

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def validate(self, data_loader):
        "validate with label error rate by the edit distance between hyps and refs"
        self.model.eval()
        with torch.no_grad():
            N, D = 0, 0
            t = tqdm(enumerate(data_loader), total=len(data_loader), desc="validating")
            for i, (data) in t:
                xs, ys, frame_lens, label_lens, filenames, texts = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                # convert likes to ctc labels
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                hyps = [onehot2int(yh[:s]).squeeze() for yh, s in zip(ys_hat, frame_lens)]
                hyps = [remove_duplicates(h, blank=0) for h in hyps]
                # slice the targets
                pos = torch.cat((torch.zeros((1, ), dtype=torch.long), torch.cumsum(label_lens, dim=0)))
                refs = [ys[s:l] for s, l in zip(pos[:-1], pos[1:])]
                # calculate ler
                N += self.edit_distance(refs, hyps)
                D += sum(len(r) for r in refs)
                ler = N * 100. / D
                t.set_description(f"validating (LER: {ler:.2f} %)")
                t.refresh()
            logger.info(f"validating at epoch {self.epoch:03d}: LER {ler:.2f} %") 
開發者ID:jinserk,項目名稱:pytorch-asr,代碼行數:27,代碼來源:train.py

示例4: _grid_constructor_from_step_size

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def _grid_constructor_from_step_size(self, step_size):

        def _grid_constructor(func, y0, t):
            start_time = t[0]
            end_time = t[-1]

            niters = torch.ceil((end_time - start_time) / step_size + 1).item()
            t_infer = torch.arange(0, niters).to(t) * step_size + start_time
            '''
            if t_infer[-1] > t[-1]:
                t_infer[-1] = t[-1]
            '''
            if t_infer[-1] != t[-1]:
                t_infer[-1] = t[-1]

            return t_infer

        return _grid_constructor 
開發者ID:autonomousvision,項目名稱:occupancy_flow,代碼行數:20,代碼來源:solvers.py

示例5: _project_old

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def _project_old(self, img_feats, xs, ys):
        out = []
        for i in range(list(img_feats.shape)[0]):
            x, y, img_feat = xs[i], ys[i], img_feats[i]
            x1, y1 = T.floor(x), T.floor(y)
            x2, y2 = T.ceil(x), T.ceil(y)
            q11 = img_feat[..., x1.long(), y1.long()].cuda()
            q12 = img_feat[..., x1.long(), y2.long()].cuda()
            q21 = img_feat[..., x2.long(), y1.long()].cuda()
            q22 = img_feat[..., x2.long(), y2.long()].cuda()

            weights = ((x2 - x) * (y2 - y)).unsqueeze(0)
            q11 *= weights

            weights = ((x - x1) * (y2 - y)).unsqueeze(0)
            q21 *= weights

            weights = ((x2 - x) * (y - y1)).unsqueeze(0)
            q12 *= weights

            weights = ((x - x1) * (y - y1)).unsqueeze(0)
            q22 *= weights
            out.append(q11 + q12 + q21 + q22)
        return T.stack(out).transpose(2, 1) 
開發者ID:justanhduc,項目名稱:graphx-conv,代碼行數:26,代碼來源:networks.py

示例6: _project

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def _project(self, img_feats, xs, ys):
        x, y = xs.flatten(), ys.flatten()
        idb = T.arange(img_feats.shape[0], device=img_feats.device)
        idb = idb[None].repeat(xs.shape[1], 1).t().flatten().long()

        x1, y1 = T.floor(x), T.floor(y)
        x2, y2 = T.ceil(x), T.ceil(y)
        q11 = img_feats[idb, :, x1.long(), y1.long()].to(img_feats.device)
        q12 = img_feats[idb, :, x1.long(), y2.long()].to(img_feats.device)
        q21 = img_feats[idb, :, x2.long(), y1.long()].to(img_feats.device)
        q22 = img_feats[idb, :, x2.long(), y2.long()].to(img_feats.device)

        weights = ((x2 - x) * (y2 - y)).unsqueeze(1)
        q11 *= weights

        weights = ((x - x1) * (y2 - y)).unsqueeze(1)
        q21 *= weights

        weights = ((x2 - x) * (y - y1)).unsqueeze(1)
        q12 *= weights

        weights = ((x - x1) * (y - y1)).unsqueeze(1)
        q22 *= weights
        out = q11 + q12 + q21 + q22
        return out.view(img_feats.shape[0], -1, img_feats.shape[1]) 
開發者ID:justanhduc,項目名稱:graphx-conv,代碼行數:27,代碼來源:networks.py

示例7: one_hot

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def one_hot(x, levels):
    """
    Output: One hot Encoding of the input.
    """

    batch_size, channel, H, W = x.size()
    x = x.unsqueeze_(4)
    x = torch.ceil(x * (LEVELS-1)).long()
    onehot = torch.zeros(batch_size, channel, H, W, levels).float().to('cuda').scatter_(4, x, 1)
    #print(onehot)

    return onehot 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:14,代碼來源:TherEncoding.py

示例8: _grid_constructor_from_step_size

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def _grid_constructor_from_step_size(self, step_size):

        def _grid_constructor(func, y0, t):
            start_time = t[0]
            end_time = t[-1]

            niters = torch.ceil((end_time - start_time) / step_size + 1).item()
            t_infer = torch.arange(0, niters).to(t) * step_size + start_time
            if t_infer[-1] > t[-1]:
                t_infer[-1] = t[-1]

            return t_infer

        return _grid_constructor 
開發者ID:rtqichen,項目名稱:torchdiffeq,代碼行數:16,代碼來源:solvers.py

示例9: rank_order

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def rank_order(
    datum: torch.Tensor, time: int, dt: float = 1.0, **kwargs
) -> torch.Tensor:
    # language=rst
    """
    Encodes data via a rank order coding-like representation. One spike per neuron,
    temporally ordered by decreasing intensity. Inputs must be non-negative.

    :param datum: Tensor of shape ``[n_samples, n_1, ..., n_k]``.
    :param time: Length of rank order-encoded spike train per input variable.
    :param dt: Simulation time step.
    :return: Tensor of shape ``[time, n_1, ..., n_k]`` of rank order-encoded spikes.
    """
    assert (datum >= 0).all(), "Inputs must be non-negative"

    shape, size = datum.shape, datum.numel()
    datum = datum.flatten()
    time = int(time / dt)

    # Create spike times in order of decreasing intensity.
    datum /= datum.max()
    times = torch.zeros(size)
    times[datum != 0] = 1 / datum[datum != 0]
    times *= time / times.max()  # Extended through simulation time.
    times = torch.ceil(times).long()

    # Create spike times tensor.
    spikes = torch.zeros(time, size).byte()
    for i in range(size):
        if 0 < times[i] < time:
            spikes[times[i] - 1, i] = 1

    return spikes.reshape(time, *shape) 
開發者ID:BindsNET,項目名稱:bindsnet,代碼行數:35,代碼來源:encodings.py

示例10: map_coordinates

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def map_coordinates(input, coordinates):
    ''' PyTorch version of scipy.ndimage.interpolation.map_coordinates
    input: (H, W)
    coordinates: (2, ...)
    '''
    h = input.shape[0]
    w = input.shape[1]

    def _coordinates_pad_wrap(h, w, coordinates):
        coordinates[0] = coordinates[0] % h
        coordinates[1] = coordinates[1] % w
        return coordinates

    co_floor = torch.floor(coordinates).long()
    co_ceil = torch.ceil(coordinates).long()
    d1 = (coordinates[1] - co_floor[1].float())
    d2 = (coordinates[0] - co_floor[0].float())
    co_floor = _coordinates_pad_wrap(h, w, co_floor)
    co_ceil = _coordinates_pad_wrap(h, w, co_ceil)
    f00 = input[co_floor[0], co_floor[1]]
    f10 = input[co_floor[0], co_ceil[1]]
    f01 = input[co_ceil[0], co_floor[1]]
    f11 = input[co_ceil[0], co_ceil[1]]
    fx1 = f00 + d1 * (f10 - f00)
    fx2 = f01 + d1 * (f11 - f01)
    return fx1 + d2 * (fx2 - fx1) 
開發者ID:zouchuhang,項目名稱:LayoutNetv2,代碼行數:28,代碼來源:pano_opt_gen.py

示例11: gaussian_filter_1d

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def gaussian_filter_1d(tensor, dim, sigma, truncate=4, kernel_size=None, padding_mode='replicate', padding_value=0.0):
    sigma = torch.as_tensor(sigma, device=tensor.device, dtype=tensor.dtype)

    if kernel_size is not None:
        kernel_size = torch.as_tensor(kernel_size, device=tensor.device, dtype=torch.int64)
    else:
        kernel_size = torch.as_tensor(2 * torch.ceil(truncate * sigma) + 1, device=tensor.device, dtype=torch.int64)

    kernel_size = kernel_size.detach()

    kernel_size_int = kernel_size.detach().cpu().numpy()

    mean = (torch.as_tensor(kernel_size, dtype=tensor.dtype) - 1) / 2

    grid = torch.arange(kernel_size, device=tensor.device) - mean

    # reshape the grid so that it can be used as a kernel for F.conv1d
    kernel_shape = [1] * len(tensor.shape)
    kernel_shape[dim] = kernel_size_int
    grid = grid.view(kernel_shape)

    grid = grid.detach()

    padding = [0] * (2 * len(tensor.shape))
    padding[dim * 2 + 1] = math.ceil((kernel_size_int - 1) / 2)
    padding[dim * 2] = math.ceil((kernel_size_int - 1) / 2)
    padding = tuple(reversed(padding))

    if padding_mode in ['replicate']:
        # replication padding has some strange constraints...
        assert len(tensor.shape) - dim <= 2
        padding = padding[:(len(tensor.shape) - 2) * 2]

    tensor_ = F.pad(tensor, padding, padding_mode, padding_value)

    # create gaussian kernel from grid using current sigma
    kernel = torch.exp(-0.5 * (grid / sigma) ** 2)
    kernel = kernel / kernel.sum()

    # convolve input with gaussian kernel
    return F.conv1d(tensor_, kernel) 
開發者ID:matthias-k,項目名稱:pysaliency,代碼行數:43,代碼來源:torch_utils.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def forward(self, x, x_lens):
        # T, B, U
        seq = [x]
        for i in range(1, self.factor):
            # This doesn't seem to make much sense...
            tmp = torch.zeros_like(x)
            tmp[:-i, :, :] = x[i:, :, :]
            seq.append(tmp)
        x_lens = torch.ceil(x_lens.float() / self.factor).int()
        # Gross, this is horrible. What a waste of memory...
        return torch.cat(seq, dim=2)[::self.factor, :, :], x_lens 
開發者ID:mlperf,項目名稱:inference,代碼行數:13,代碼來源:rnn.py

示例13: ceil

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def ceil(x, out=None):
    """
    Return the ceil of the input, element-wise.

    The ceil of the scalar x is the smallest integer i, such that i >= x. It is often denoted as :math:`\\lceil x \\rceil`.

    Parameters
    ----------
    x : ht.DNDarray
        The value for which to compute the ceiled values.
    out : ht.DNDarray or None, optional
        A location in which to store the results. If provided, it must have a broadcastable shape. If not provided
        or set to None, a fresh tensor is allocated.

    Returns
    -------
    ceiled : ht.DNDarray
        A tensor of the same shape as x, containing the ceiled valued of each element in this tensor. If out was
        provided, ceiled is a reference to it.

    Examples
    --------
    >>> ht.ceil(ht.arange(-2.0, 2.0, 0.4))
    tensor([-2., -1., -1., -0., -0., -0.,  1.,  1.,  2.,  2.])
    """
    return operations.__local_op(torch.ceil, x, out) 
開發者ID:helmholtz-analytics,項目名稱:heat,代碼行數:28,代碼來源:rounding.py

示例14: test

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def test(self, data_loader):
        "test with word error rate by the edit distance between hyps and refs"
        self.model.eval()
        with torch.no_grad():
            N, D = 0, 0
            t = tqdm(enumerate(data_loader), total=len(data_loader), desc="testing")
            for i, (data) in t:
                xs, ys, frame_lens, label_lens, filenames, texts = data
                if self.use_cuda:
                    xs = xs.cuda()
                ys_hat = self.model(xs)
                frame_lens = torch.ceil(frame_lens.float() / FRAME_REDUCE_FACTOR).int()
                # latgen decoding
                loglikes = torch.log(ys_hat)
                if self.use_cuda:
                    loglikes = loglikes.cpu()
                words, alignment, w_sizes, a_sizes = self.decoder(loglikes, frame_lens)
                hyps = [w[:s] for w, s in zip(words, w_sizes)]
                # convert target texts to word indices
                w2i = lambda w: self.decoder.wordi[w] if w in self.decoder.wordi else self.decoder.wordi['<unk>']
                refs = [[w2i(w) for w in t.strip().split()] for t in texts]
                # calculate wer
                N += self.edit_distance(refs, hyps)
                D += sum(len(r) for r in refs)
                wer = N * 100. / D
                t.set_description(f"testing (WER: {wer:.2f} %)")
                t.refresh()
            logger.info(f"testing at epoch {self.epoch:03d}: WER {wer:.2f} %") 
開發者ID:jinserk,項目名稱:pytorch-asr,代碼行數:30,代碼來源:train.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import ceil [as 別名]
def forward(self, x, input_lengths):
        '''
        x  [batch_size, mel_bins, T]

        return [batch_size, T, channels]
        '''
        x = x.transpose(1, 2)

        x_sorted, sorted_lengths, initial_index = sort_batch(x, input_lengths)

        x_packed = nn.utils.rnn.pack_padded_sequence(
            x_sorted, sorted_lengths.cpu().numpy(), batch_first=True)

        self.lstm1.flatten_parameters()
        outputs, _ = self.lstm1(x_packed)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True, total_length=x.size(1)) # use total_length make sure the recovered sequence length not changed

        outputs = outputs.reshape(x.size(0), -1, self.concat_hidden_dim)

        output_lengths = torch.ceil(sorted_lengths.float() / self.n_frames_per_step).long()
        outputs = nn.utils.rnn.pack_padded_sequence(
            outputs, output_lengths.cpu().numpy() , batch_first=True)

        self.lstm2.flatten_parameters()
        outputs, _ = self.lstm2(outputs)

        outputs, _ = nn.utils.rnn.pad_packed_sequence(
            outputs, batch_first=True)

        return outputs[initial_index], output_lengths[initial_index] 
開發者ID:jxzhanggg,項目名稱:nonparaSeq2seqVC_code,代碼行數:34,代碼來源:layers.py


注:本文中的torch.ceil方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。