當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.sigmoid方法代碼示例

本文整理匯總了Python中torch.sigmoid方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.sigmoid方法的具體用法?Python torch.sigmoid怎麽用?Python torch.sigmoid使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.sigmoid方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self, src, tgt, src_mask, tgt_mask):
        """
        Take in and process masked src and target sequences.
        """
        memory = self.encode(src, src_mask)  # (batch_size, max_src_seq, d_model)
        # attented_mem=self.attention(memory,memory,memory,src_mask)
        # memory=attented_mem
        score = self.attention(memory, memory, src_mask)
        attent_memory = score.bmm(memory)
        # memory=self.linear(torch.cat([memory,attent_memory],dim=-1))

        memory, _ = self.gru(attented_mem)
        '''
        score=torch.sigmoid(self.linear(memory))
        memory=memory*score
        '''
        latent = torch.sum(memory, dim=1)  # (batch_size, d_model)
        logit = self.decode(latent.unsqueeze(1), tgt, tgt_mask)  # (batch_size, max_tgt_seq, d_model)
        # logit,_=self.gru_decoder(logit)
        prob = self.generator(logit)  # (batch_size, max_seq, vocab_size)
        return latent, prob 
開發者ID:Nrgeup,項目名稱:controllable-text-attribute-transfer,代碼行數:23,代碼來源:model2.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self, inputs):
        inputs = list(inputs)
        assert len(inputs) == len(self.in_channels) + 1  # +1 for input image
        img = inputs.pop(0)
        # FPN forward
        x = super().forward(tuple(inputs))
        for rfp_idx in range(self.rfp_steps - 1):
            rfp_feats = [x[0]] + list(
                self.rfp_aspp(x[i]) for i in range(1, len(x)))
            x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
            # FPN forward
            x_idx = super().forward(x_idx)
            x_new = []
            for ft_idx in range(len(x_idx)):
                add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
                x_new.append(add_weight * x_idx[ft_idx] +
                             (1 - add_weight) * x[ft_idx])
            x = x_new
        return x 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:21,代碼來源:rfp.py

示例3: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self,iput):

		bin_a=None
		level1_rep=None
		[batch_size,_,_]=iput.size()

		for hm,hm_encdr in enumerate(self.rnn_hms):
			hmod=iput[:,:,hm].contiguous()
			hmod=torch.t(hmod).unsqueeze(2)

			op,a= hm_encdr(hmod)
			if level1_rep is None:
				level1_rep=op
				bin_a=a
			else:
				level1_rep=torch.cat((level1_rep,op),1)
				bin_a=torch.cat((bin_a,a),1)
		level1_rep=level1_rep.permute(1,0,2)
		final_rep_1,hm_level_attention_1=self.hm_level_rnn_1(level1_rep)
		final_rep_1=final_rep_1.squeeze(1)
		prediction_m=((self.fdiff1_1(final_rep_1)))
		
		return torch.sigmoid(prediction_m) 
開發者ID:kipoi,項目名稱:models,代碼行數:25,代碼來源:models.py

示例4: plot_wh_methods

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def plot_wh_methods():  # from utils.utils import *; plot_wh_methods()
    # Compares the two methods for width-height anchor multiplication
    # https://github.com/ultralytics/yolov3/issues/168
    x = np.arange(-4.0, 4.0, .1)
    ya = np.exp(x)
    yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2

    fig = plt.figure(figsize=(6, 3), dpi=150)
    plt.plot(x, ya, '.-', label='yolo method')
    plt.plot(x, yb ** 2, '.-', label='^2 power method')
    plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
    plt.xlim(left=-4, right=4)
    plt.ylim(bottom=0, top=6)
    plt.xlabel('input')
    plt.ylabel('output')
    plt.legend()
    fig.tight_layout()
    fig.savefig('comparison.png', dpi=200) 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:20,代碼來源:utils.py

示例5: fuse_prob

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def fuse_prob(self, x_emb, dec_logit):
        ''' Takes context and decoder logit to perform word embedding fusion '''
        # Compute distribution for dec/emb
        if self.fuse_normalize:
            emb_logit = nn.functional.linear(nn.functional.normalize(x_emb, dim=-1),
                                             nn.functional.normalize(self.emb_table.weight, dim=-1))
        else:
            emb_logit = nn.functional.linear(x_emb, self.emb_table.weight)
        emb_prob = (nn.functional.relu(self.temp)*emb_logit).softmax(dim=-1)
        dec_prob = dec_logit.softmax(dim=-1)
        # Mix distribution
        if self.fuse_learnable:
            fused_prob = (1-torch.sigmoid(self.fuse_lambda))*dec_prob +\
                torch.sigmoid(self.fuse_lambda)*emb_prob
        else:
            fused_prob = (1-self.fuse_lambda)*dec_prob + \
                self.fuse_lambda*emb_prob
        # Log-prob
        log_fused_prob = (fused_prob+self.eps).log()

        return log_fused_prob 
開發者ID:Alexander-H-Liu,項目名稱:End-to-end-ASR-Pytorch,代碼行數:23,代碼來源:plugin.py

示例6: logits_nll_loss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def logits_nll_loss(input, target, weight=None, reduction='mean'):
    """logits_nll_loss
    Different from nll loss, this is for sigmoid based loss.
    The difference is this will add along C(class) dim.
    """

    assert input.dim() == 2, 'Input shape should be (B, C).'
    if input.size(0) != target.size(0):
        raise ValueError(
            'Expected input batch_size ({}) to match target batch_size ({}).' .format(
                input.size(0), target.size(0)))

    ret = input.sum(dim=-1)
    if weight is not None:
        ret = _batch_weight(weight, target) * ret
    return reducing(ret, reduction) 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:18,代碼來源:functional.py

示例7: evo_norm

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def evo_norm(x, prefix, running_var, v, weight, bias,
             training, momentum, eps=0.1, groups=32):
    if prefix == 'b0':
        if training:
            var = torch.var(x, dim=(0, 2, 3), keepdim=True)
            running_var.mul_(momentum)
            running_var.add_((1 - momentum) * var)
        else:
            var = running_var
        if v is not None:
            den = torch.max((var + eps).sqrt(), v * x + instance_std(x, eps))
            x = x / den * weight + bias
        else:
            x = x * weight + bias
    else:
        if v is not None:
            x = x * torch.sigmoid(v * x) / group_std(x,
                                                     groups, eps) * weight + bias
        else:
            x = x * weight + bias

    return x 
開發者ID:PistonY,項目名稱:torch-toolbox,代碼行數:24,代碼來源:functional.py

示例8: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self):
        """ Calculate loss:
            $L(sigma) = (||Phi(embed + epsilon) - Phi(embed)||_2^2)
            // (regularization^2) - rate * log(sigma)$
        :return: a scalar, the target loss.
        :rtype: torch.FloatTensor
        """
        ratios = torch.sigmoid(self.ratio)  # S * 1
        x = self.input_embeddings + 0.0
        x_tilde = (
            x
            + ratios
            * torch.randn(self.input_size, self.input_dimension).to(x.device)
            * self.scale
        )  # S * D
        s = self.Phi(x)  # D or S * D
        s_tilde = self.Phi(x_tilde)
        loss = (s_tilde - s) ** 2
        if self.regular is not None:
            loss = torch.mean(loss / self.regular ** 2)
        else:
            loss = torch.mean(loss) / torch.mean(s ** 2)

        return loss - torch.mean(torch.log(ratios)) * self.rate 
開發者ID:interpretml,項目名稱:interpret-text,代碼行數:26,代碼來源:unified_information.py

示例9: predict

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def predict(self, state):
        
        example, kb = self.gen_example(state)
        feature = self.gen_feature(example)
        
        input_ids = torch.tensor([feature.input_ids], dtype=torch.long).to(self.device)
        input_masks = torch.tensor([feature.input_mask], dtype=torch.long).to(self.device)
        segment_ids = torch.tensor([feature.segment_ids], dtype=torch.long).to(self.device)

        with torch.no_grad():
            logits = self.model(input_ids, segment_ids, input_masks, labels=None)
            logits = torch.sigmoid(logits)
        preds = (logits > 0.4).float()
        preds_numpy = preds.cpu().nonzero().squeeze().numpy()
        
#        for i in preds_numpy:
#            if i < 10:
#                print(Constants.domains[i], end=' ')
#            elif i < 17:
#                print(Constants.functions[i-10], end=' ')
#            else:
#                print(Constants.arguments[i-17], end=' ')
#        print()
        
        return preds, kb 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:27,代碼來源:predictor.py

示例10: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self, src_seq, src_pos, act_vocab_id):
        # -- Prepare masks
        slf_attn_mask = get_attn_key_pad_mask(seq_k=src_seq, seq_q=src_seq)
        non_pad_mask = get_non_pad_mask(src_seq)

        # -- Forward Word Embedding
        enc_output = self.src_word_emb(src_seq) + self.position_enc(src_pos)
        # -- Forward Ontology Embedding
        ontology_embedding = self.src_word_emb(act_vocab_id)

        for enc_layer in self.layer_stack:
            enc_output, enc_slf_attn = enc_layer(
                enc_output,
                non_pad_mask=non_pad_mask,
                slf_attn_mask=slf_attn_mask)

        dot_prod = torch.sum(enc_output[:, :, None, :] * ontology_embedding[None, None, :, :], -1)
        #index = length[:, None, None].repeat(1, 1, dot_prod.size(-1))
        #pooled_dot_prod = dot_prod.gather(1, index).squeeze()
        pooled_dot_prod = dot_prod[:, 0, :]
        pooling_likelihood = torch.sigmoid(pooled_dot_prod)
        return pooling_likelihood, enc_output 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:24,代碼來源:Transformer.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self, grad_norm, grad_sign, param_norm, param_sign, loss_norm, hx):
            batch_size = grad_norm.size(0)
            inputs = torch.stack((grad_norm, grad_sign, param_norm, param_sign, loss_norm.expand(grad_norm.size(0))),
                                 dim=1)
            if hx is None:
                self.lrs = []
                if self.forget_gate:
                    self.fgs = []
                hx = (self.h_0.expand((batch_size, -1)), self.c_0.expand((batch_size, -1)))
            h, c = self.lstm(inputs, hx)
            if self.layer_norm is not None:
                h = self.layer_norm(h)
            if self.input_gate:
                lr = torch.sigmoid(self.lr_layer(h))
            else:
                lr = self.output_layer(h)
            self.lrs.append(lr.mean().item())
            if self.forget_gate:
                fg = torch.sigmoid(self.fg_layer(h))
                self.fgs.append(fg.mean().item())
                return lr, fg, (h, c)
            else:
                return lr, (h, c) 
開發者ID:THUDM,項目名稱:ScenarioMeta,代碼行數:25,代碼來源:meta.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self,h,emb):
        sbatch,nsq,lchunk=h.size()
        h=h.contiguous()
        """
        # Slower version
        ws=list(self.adapt_w(emb).view(sbatch,self.ncha,1,self.kw))
        bs=list(self.adapt_b(emb))
        hs=list(torch.chunk(h,sbatch,dim=0))
        out=[]
        for hi,wi,bi in zip(hs,ws,bs):
            out.append(torch.nn.functional.conv1d(hi,wi,bias=bi,padding=self.kw//2,groups=nsq))
        h=torch.cat(out,dim=0)
        """
        # Faster version fully using group convolution
        w=self.adapt_w(emb).view(-1,1,self.kw)
        b=self.adapt_b(emb).view(-1)
        h=torch.nn.functional.conv1d(h.view(1,-1,lchunk),w,bias=b,padding=self.kw//2,groups=sbatch*nsq).view(sbatch,self.ncha,lchunk)
        #"""
        h=self.net.forward(h)
        s,m=torch.chunk(h,2,dim=1)
        s=torch.sigmoid(s+2)+1e-7
        return s,m

########################################################################################################################
######################################################################################################################## 
開發者ID:joansj,項目名稱:blow,代碼行數:27,代碼來源:blow.py

示例13: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def forward(self, x):
        """
        :param x: Long tensor of size ``(batch_size, num_fields)``
        """
        embed_x = self.embedding(x)
        atten_x = self.atten_embedding(embed_x)
        cross_term = atten_x.transpose(0, 1)
        for self_attn in self.self_attns:
            cross_term, _ = self_attn(cross_term, cross_term, cross_term)
        cross_term = cross_term.transpose(0, 1)
        if self.has_residual:
            V_res = self.V_res_embedding(embed_x)
            cross_term += V_res
        cross_term = F.relu(cross_term).contiguous().view(-1, self.atten_output_dim)
        x = self.linear(x) + self.attn_fc(cross_term) + self.mlp(embed_x.view(-1, self.embed_output_dim))
        return torch.sigmoid(x.squeeze(1)) 
開發者ID:rixwew,項目名稱:pytorch-fm,代碼行數:18,代碼來源:afi.py

示例14: swish

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def swish(x):
    return x * torch.sigmoid(x) 
開發者ID:ymcui,項目名稱:cmrc2019,代碼行數:4,代碼來源:modeling.py

示例15: BCE_bootstrap_with_logits

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import sigmoid [as 別名]
def BCE_bootstrap_with_logits(input, target, ishard=False, beta=0.95, weight=None, size_average=True):
    r"""Function that measures Binary Cross Entropy between target and output
    logits with prediction consistency(bootstrap)

    Args:
        input: Variable of arbitrary shape
        target: Variable of the same shape as input
        ishard: Choose soft/hard bootstrap mode
        beta: Weight between ``gt`` label and prediction. In paper, 0.8 for hard and 0.95 for soft
        weight (Variable, optional): a manual rescaling weight
                if provided it's repeated to match input tensor shape
        size_average (bool, optional): By default, the losses are averaged
                over observations for each minibatch. However, if the field
                sizeAverage is set to False, the losses are instead summed
                for each minibatch. Default: ``True``

    Examples::

         >>> input = autograd.Variable(torch.randn(3), requires_grad=True)
         >>> target = autograd.Variable(torch.FloatTensor(3).random_(2))
         >>> loss = BCE_bootstrap_with_logits(input, target)
         >>> loss.backward()
    """
    if not (target.size() == input.size()):
        raise ValueError("Target size ({}) must be the same as input size ({})".format(target.size(), input.size()))
    input_prob = torch.sigmoid(input)
    if ishard:
        target = target * beta + (input_prob>0.5) * (1-beta)
    else:
        target = target * beta + input_prob * (1-beta)
    print(target)
    max_val = (-input).clamp(min=0)
    loss = input - input * target + max_val + ((-max_val).exp() + (-input - max_val).exp()).log()

    if weight is not None:
        loss = loss * weight

    if size_average:
        return loss.mean()
    else:
        return loss.sum() 
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:43,代碼來源:loss_function.py


注:本文中的torch.sigmoid方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。