当前位置: 首页>>代码示例>>Python>>正文


Python utils.fill_with_neg_inf方法代码示例

本文整理汇总了Python中fairseq.utils.fill_with_neg_inf方法的典型用法代码示例。如果您正苦于以下问题:Python utils.fill_with_neg_inf方法的具体用法?Python utils.fill_with_neg_inf怎么用?Python utils.fill_with_neg_inf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在fairseq.utils的用法示例。


在下文中一共展示了utils.fill_with_neg_inf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: buffered_future_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def buffered_future_mask(self, tensor):
        """attend all surounding words except itself
           [[0, -inf, 0]
            [0,  0, -inf]
            [0,  0,   0]]
        The attention map is not ture diagonal since we predict y_{t+1} at time-step t
        """
        dim = tensor.size(0)
        if (
            not hasattr(self, "_future_mask")
            or self._future_mask is None
            or self._future_mask.device != tensor.device
        ):
            self._future_mask = torch.triu(
                utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
            )
            self._future_mask = torch.tril(self._future_mask, 1)
        if self._future_mask.size(0) < dim:
            self._future_mask = torch.triu(
                utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
            )
            self._future_mask = torch.tril(self._future_mask, 1)
        return self._future_mask[:dim, :dim] 
开发者ID:pytorch,项目名称:translate,代码行数:25,代码来源:cloze_transformer_model.py

示例2: get_attention_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def get_attention_mask(self, x, src_len, waitk=None):
        if waitk is None:
            if self.multi_waitk:
                assert self.min_waitk <= self.max_waitk
                waitk = random.randint(min(self.min_waitk, src_len),
                                       min(src_len, self.max_waitk))
            else:
                waitk = self.waitk

        if waitk < src_len:
            encoder_attn_mask = torch.triu(
                utils.fill_with_neg_inf(
                    x.new(x.size(0), src_len)
                ), waitk
            )
            if waitk <= 0:
                encoder_attn_mask[:, 0] = 0

        else:
            encoder_attn_mask = None
        return encoder_attn_mask 
开发者ID:elbayadm,项目名称:attn2d,代码行数:23,代码来源:waitk_transformer.py

示例3: get_transitions

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def get_transitions(self, controls):
        """
        Inputs:
            controls:  log(rho) & log(1-rho)  read/write probabilities: (Tt, B, Ts, 2)
        Returns the log-transition matrix (Tt, B, Ts, Ts)
            k->j :  p(z_t+1 = j | z_t = k) = (1-rho_tj) prod_l rho_tl
        """
        Tt, N, Ts, _ = controls.size()
        # force rho_tTx = 0
        controls[:, :, -1, 0] = - float('inf')
        controls[:, :, -1, 1] = 0
        M = utils.fill_with_neg_inf(controls.new_empty((Tt, N, Ts, Ts)))
        for k in range(Ts):
            for j in range(k, Ts):
                M[:, :, k, j] = controls[:, :, j, 1] + torch.sum(controls[:, :, k:j, 0], dim=-1)
        print('Controls p(read)', torch.exp(controls[:,:,:,0]).round().data)
        print('M(t=0)', torch.exp(M[0,0]).round().data)
        print('M(t=2)', torch.exp(M[2,0]).round().data)

        return M 
开发者ID:elbayadm,项目名称:attn2d,代码行数:22,代码来源:hmm_controls.py

示例4: _forward_alpha

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def _forward_alpha(self, emissions, M):
        Tt, B, Ts = emissions.size()
        alpha = utils.fill_with_neg_inf(torch.empty_like(emissions))  # Tt, B, Ts
        # initialization  t=1
        initial = torch.empty_like(alpha[0]).fill_(-math.log(Ts))  # log(1/Ts)
        # initial = utils.fill_with_neg_inf(torch.empty_like(alpha[0])) 
        # initial[:, 0] = 0
        alpha[0] = emissions[0] + initial
        # print('Initialize alpha:', alpha[0])
        # induction
        for i in range(1, Tt):
            alpha[i] = torch.logsumexp(alpha[i-1].unsqueeze(-1) + M[i-1], dim=1)
            alpha[i] = alpha[i] + emissions[i]
            # print('Emissions@', i, emissions[i])
            # print('alpha@',i, alpha[i])
        return alpha 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:hmm_controls.py

示例5: get_transitions

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def get_transitions(self, controls):
        """
        Inputs:
            controls:  log(rho) & log(1-rho)  read/write probabilities: (Tt, B, Ts, 2)
        Returns the log-transition matrix (Tt, B, Ts, Ts)
            k->j :  p(z_t+1 = j | z_t = k) = (1-rho_tj) prod_l rho_tl
        """
        Tt, N, Ts, _ = controls.size()
        # force rho_tTx = 0
        controls[:, :, -1, 0] = - float('inf')
        controls[:, :, -1, 1] = 0
        M = utils.fill_with_neg_inf(controls.new_empty((Tt, N, Ts, Ts)))
        for k in range(Ts):
            for j in range(k, Ts):
                M[:, :, k, j] = controls[:, :, j, 1] + torch.sum(controls[:, :, k:j, 0], dim=-1)
        return M 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:hmm_controls2.py

示例6: get_transitions

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def get_transitions(self, controls):
        """
        Inputs:
            controls:  log(rho) & log(1-rho)  read/write probabilities: (Tt, N, Ts, 2)
        Returns the log-transition matrix (N, Tt, Ts, Ts)
            k->j :  p(z_t+1 = j | z_t = k) = (1-rho_tj) prod_l rho_tl
        """
        Tt, N, Ts, _ = controls.size()
        # force rho_tTx = 0
        controls[:, :, -1, 0] = - float('inf')
        controls[:, :, -1, 1] = 0
        M = utils.fill_with_neg_inf(controls.new_empty((Tt, N, Ts, Ts)))
        for k in range(Ts):
            for j in range(k, Ts):
                M[:, :, k, j] = controls[:, :, j, 1] + torch.sum(controls[:, :, k:j, 0], dim=-1)
        return M 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:dynamic_controls.py

示例7: fill_controls_emissions_grid

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def fill_controls_emissions_grid(self, controls, emissions, indices, src_length):
        """
        Return controls (C) and emissions (E) covering all the grid
        C : Tt, N, Ts, 2
        E : Tt, N, Ts
        """
        N = controls[0].size(0)
        tgt_length = len(controls)
        Cread = controls[0].new_zeros((tgt_length, src_length, N, 1))
        Cwrite = utils.fill_with_neg_inf(torch.empty_like(Cread))
        triu_mask = torch.triu(controls[0].new_ones(tgt_length, src_length), 1).byte()
        triu_mask = triu_mask.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, N, 1)
        Cwrite.masked_fill_(triu_mask, 0)
        C = torch.cat((Cread, Cwrite), dim=-1)
        E = utils.fill_with_neg_inf(emissions[0].new(tgt_length, src_length, N))
        for t, (subC, subE) in enumerate(zip(controls, emissions)):
            select = [indices[t]]
            C[t].index_put_(select, subC.transpose(0, 1))
            E[t].index_put_(select, subE.transpose(0, 1))
        return C.transpose(1, 2), E.transpose(1, 2) 
开发者ID:elbayadm,项目名称:attn2d,代码行数:22,代码来源:dynamic_controls.py

示例8: _forward_alpha

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def _forward_alpha(self, emissions, M):
        Tt, B, Ts = emissions.size()
        alpha = utils.fill_with_neg_inf(torch.empty_like(emissions))  # Tt, B, Ts
        # initialization  t=1
        # initial = torch.empty_like(alpha[0]).fill_(-math.log(Ts))  # log(1/Ts)
        initial = utils.fill_with_neg_inf(torch.empty_like(alpha[0])) 
        initial[:, 0] = 0
        alpha[0] = emissions[0] + initial
        # print('Initialize alpha:', alpha[0])
        # induction
        for i in range(1, Tt):
            alpha[i] = torch.logsumexp(alpha[i-1].unsqueeze(-1) + M[i-1], dim=1)
            alpha[i] = alpha[i] + emissions[i]
            # print('Emissions@', i, emissions[i])
            # print('alpha@',i, alpha[i])
        return alpha 
开发者ID:elbayadm,项目名称:attn2d,代码行数:18,代码来源:hmm_controls3.py

示例9: fill_controls_emissions_grid

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def fill_controls_emissions_grid(self, controls, emissions, indices, src_length):
        """
        Return controls (C) and emissions (E) covering all the grid
        C : Tt, N, Ts, 2
        E : Tt, N, Ts
        """
        N = controls[0].size(0)
        tgt_length = len(controls)
        gamma = controls[0].new_zeros((tgt_length, src_length, N))
        Cread = controls[0].new_zeros((tgt_length, src_length, N, 1))
        Cwrite = utils.fill_with_neg_inf(torch.empty_like(Cread))
        triu_mask = torch.triu(controls[0].new_ones(tgt_length, src_length), 1).byte()
        triu_mask = triu_mask.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, N, 1)
        Cwrite.masked_fill_(triu_mask, 0)
        C = torch.cat((Cread, Cwrite), dim=-1)
        E = utils.fill_with_neg_inf(emissions[0].new(tgt_length, src_length, N))
        for t, (subC, subE) in enumerate(zip(controls, emissions)):
            select = [indices[t].to(C.device)]
            C[t].index_put_(select, subC.transpose(0, 1))
            E[t].index_put_(select, subE.transpose(0, 1))
            gamma[t].index_fill_(0, select[0], 1)
        # Normalize gamma:
        gamma = gamma / gamma.sum(dim=1, keepdim=True)
        return C.transpose(1, 2), E.transpose(1, 2), gamma.transpose(1, 2) 
开发者ID:elbayadm,项目名称:attn2d,代码行数:26,代码来源:oracle_controls.py

示例10: forward

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def forward(self, x, need_attention_weights=False):
        x = F.glu(self.linear(x), dim=-1) # B, Tt, Ts, C
        if not need_attention_weights:
            # Maxpool 
            B, Tt, Ts, C = x.size()
            mask = torch.triu(utils.fill_with_neg_inf(x.new(Tt, Ts)), self.waitk)
            x, _ = (
                x + mask.unsqueeze(0).unsqueeze(-1)
            ).max(dim=2)  # B, Tt, C
            return x, None
        # Output attention weights:
        if need_attention_weights:
            # x in B, Tt, Ts, C
            B, Tt, Ts, C = x.size()
            x, indices = x.max(dim=2)
            # indices in B, Tt, C with each channel selecting a source position
            # Terrible but will do:
            attn = x.new_zeros(B, Tt, Ts)
            for i in range(Ts):
                attn[:,:,i] = indices.eq(i).sum(dim=-1)
            # Normalize
            attn = attn / attn.sum(dim=-1, keepdim=True)
        return x, attn 
开发者ID:elbayadm,项目名称:attn2d,代码行数:25,代码来源:aggregators.py

示例11: buffered_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def buffered_mask(self, tensor):
        dim = tensor.size(-1)
        if self._mask is None:
            self._mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
        if self._mask.size(0) < dim:
            self._mask = torch.triu(utils.fill_with_neg_inf(self._mask.resize_(dim, dim)), 1)
        return self._mask[:dim, :dim] 
开发者ID:nusnlp,项目名称:crosentgec,代码行数:9,代码来源:multihead_attention.py

示例12: buffered_future_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def buffered_future_mask(self, tensor):
        dim = tensor.size(0)
        if (
            not hasattr(self, "_future_mask")
            or self._future_mask is None
            or self._future_mask.device != tensor.device
        ):
            self._future_mask = torch.triu(
                utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
            )
        if self._future_mask.size(0) < dim:
            self._future_mask = torch.triu(
                utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
            )
        return self._future_mask[:dim, :dim] 
开发者ID:pytorch,项目名称:fairseq,代码行数:17,代码来源:vggtransformer.py

示例13: buffered_future_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def buffered_future_mask(self, tensor):
        dim = tensor.size(0)
        # self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
        if (
            self._future_mask.size(0) == 0
            or (not self._future_mask.device == tensor.device)
            or self._future_mask.size(0) < dim
        ):
            self._future_mask = torch.triu(
                utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
            )
        self._future_mask = self._future_mask.to(tensor)
        return self._future_mask[:dim, :dim] 
开发者ID:pytorch,项目名称:fairseq,代码行数:15,代码来源:transformer.py

示例14: buffered_future_mask

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def buffered_future_mask(self, tensor):
        dim = tensor.size(0)
        if not hasattr(self, '_future_mask') or self._future_mask is None or self._future_mask.device != tensor.device:
            self._future_mask = torch.triu(utils.fill_with_neg_inf(tensor.new(dim, dim)), 1)
        if self._future_mask.size(0) < dim:
            self._future_mask = torch.triu(utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1)
        return self._future_mask[:dim, :dim] 
开发者ID:pytorch,项目名称:fairseq,代码行数:9,代码来源:lightconv.py

示例15: forward

# 需要导入模块: from fairseq import utils [as 别名]
# 或者: from fairseq.utils import fill_with_neg_inf [as 别名]
def forward(self, src_tokens, src_lengths=None, mask=None, **kwargs):
        """
        Args: src_tokens (batch, src_len)
              src_lengths (batch) 
        Returns:
            dict: - **encoder_out** (src_len, batch, embed_dim)
                  - **encoder_padding_mask**  (batch, src_len)
        """
        # embed tokens and positions
        x = self.embed_scale * self.embed_tokens(src_tokens)
        if self.embed_positions is not None:
            x += self.embed_positions(src_tokens)
        x = F.dropout(x, p=self.dropout, training=self.training)

        # B x T x C -> T x B x C
        x = x.transpose(0, 1)
        # compute padding mask
        encoder_padding_mask = src_tokens.eq(self.padding_idx)
        if not encoder_padding_mask.any():
            encoder_padding_mask = None

        # encoder layers
        if mask is None:
            mask = torch.triu(utils.fill_with_neg_inf(x.new(x.size(0), x.size(0))), 1)
        for layer in self.layers:
            # Make the encoder unidirectional
            x = layer(
                x, encoder_padding_mask,
                self_attn_mask=mask,
            )

        if self.normalize:
            x = self.layer_norm(x)

        return {
            'encoder_out': x,  # T x B x C
            'encoder_padding_mask': encoder_padding_mask,  # B x T
        } 
开发者ID:elbayadm,项目名称:attn2d,代码行数:40,代码来源:waitk_transformer.py


注:本文中的fairseq.utils.fill_with_neg_inf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。