當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.normal方法代碼示例

本文整理匯總了Python中torch.normal方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.normal方法的具體用法?Python torch.normal怎麽用?Python torch.normal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.normal方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_sparse_input

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def get_sparse_input(x_input):
    """
    get a sparse matrix of x_input: [T,B,V] where x_sparse[i][j][k]=1, and others = 1e-8
    :param x_input: *Tensor* of [T,B]
    :return: *Tensor* in shape [B,T,V]
    """
    # indexes that will make no effect in copying
    sw = time.time()
    print('sparse input start: %s' % sw)
    ignore_index = [0]
    result = torch.normal(mean=0, std=torch.zeros(x_input.size(0), x_input.size(1), cfg.vocab_size))
    for t in range(x_input.size(0)):
        for b in range(x_input.size(1)):
            if x_input[t][b] not in ignore_index:
                result[t][b][x_input[t][b]] = 1.0
    print('sparse input end %s' % time.time())
    return result.transpose(0, 1) 
開發者ID:AuCson,項目名稱:SEDST,代碼行數:19,代碼來源:semi_sup_net.py

示例2: _sqrt_hessian_sampled

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def _sqrt_hessian_sampled(self, module, g_inp, g_out, mc_samples=1):
        """A Monte-Carlo estimate of the square-root of the Hessian.

        Attributes:
            module: (torch.nn.MSELoss) module.
            g_inp: Gradient of loss w.r.t. input.
            g_out: Gradient of loss w.r.t. output.
            mc_samples: (int, optional) Number of MC samples to use. Default: 1.

        Returns:
            tensor:
        """
        N, D = module.input0.shape
        samples = normal(0, 1, size=[mc_samples, N, D], device=module.input0.device)
        samples *= sqrt(2) / sqrt(mc_samples)

        if module.reduction == "mean":
            samples /= sqrt(module.input0.numel())

        return samples 
開發者ID:f-dangel,項目名稱:backpack,代碼行數:22,代碼來源:mseloss.py

示例3: construct_edge_mask

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def construct_edge_mask(self, num_nodes, init_strategy="normal", const_val=1.0):
        mask = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
        if init_strategy == "normal":
            std = nn.init.calculate_gain("relu") * math.sqrt(
                2.0 / (num_nodes + num_nodes)
            )
            with torch.no_grad():
                mask.normal_(1.0, std)
                # mask.clamp_(0.0, 1.0)
        elif init_strategy == "const":
            nn.init.constant_(mask, const_val)

        if self.args.mask_bias:
            mask_bias = nn.Parameter(torch.FloatTensor(num_nodes, num_nodes))
            nn.init.constant_(mask_bias, 0.0)
        else:
            mask_bias = None

        return mask, mask_bias 
開發者ID:RexYing,項目名稱:gnn-model-explainer,代碼行數:21,代碼來源:explain.py

示例4: inverse

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def inverse(self, melspectrogram, iters=1000):
        x = torch.normal(0, 1e-6, size=((melspectrogram.size(1) - 1) * self.hp.audio.hop_length, )).cuda().requires_grad_()
        optimizer = torch.optim.LBFGS([x], tolerance_change=1e-16)
        melspectrogram = self.post_spec(melspectrogram)

        def closure():
            optimizer.zero_grad()
            mel = self.get_mel(x)
            loss = self.criterion(mel, melspectrogram)
            loss.backward()
            return loss

        with tqdm(range(iters)) as pbar:
            for i in pbar:
                optimizer.step(closure=closure)
                pbar.set_postfix(loss=self.criterion(self.get_mel(x), melspectrogram).item())

        return x, self.pre_spec(self.get_mel(x)) 
開發者ID:Deepest-Project,項目名稱:MelNet,代碼行數:20,代碼來源:reconstruct.py

示例5: random_masking

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def random_masking(self, batch_images, batch_mask, device):
        """
        with probability 10% we keep the image unchanged;
        with probability 10% we change the mask region to a normal distribution
        with 80% we mask the region as 0.
        :param batch_images: image to be masked
        :param batch_mask: mask region
        :param device:
        :return: masked image
        """
        return batch_images
        # TODO disabled
        temp = random.random()
        if temp > 0.1:
            batch_images = batch_images * batch_mask.unsqueeze(1).float()
            if temp < 0.2:
                batch_images = batch_images + (
                    ((-batch_mask.unsqueeze(1).float()) + 1)
                    * torch.normal(mean=0.5, std=torch.ones(batch_images.shape)).to(device)
                )
        return batch_images 
開發者ID:epfml,項目名稱:attention-cnn,代碼行數:23,代碼來源:transformer.py

示例6: sample

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def sample(self, n):
        self.generator.eval()

        output_info = self.transformer.output_info
        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            fake = self.generator(noise, output_info)
            data.append(fake.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:n]
        return self.transformer.inverse_transform(data) 
開發者ID:sdv-dev,項目名稱:SDGym,代碼行數:18,代碼來源:veegan.py

示例7: sample

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def sample(self, samples):
        self.decoder.eval()

        steps = samples // self.batch_size + 1
        data = []
        for _ in range(steps):
            mean = torch.zeros(self.batch_size, self.embedding_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            fake, sigmas = self.decoder(noise)
            fake = torch.tanh(fake)
            data.append(fake.detach().cpu().numpy())

        data = np.concatenate(data, axis=0)
        data = data[:samples]
        return self.transformer.inverse_transform(data, sigmas.detach().cpu().numpy()) 
開發者ID:sdv-dev,項目名稱:SDGym,代碼行數:18,代碼來源:tvae.py

示例8: sample

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def sample(self, n):
        self.generator.eval()
        self.decoder.eval()

        steps = n // self.batch_size + 1
        data = []
        for i in range(steps):
            mean = torch.zeros(self.batch_size, self.random_dim)
            std = mean + 1
            noise = torch.normal(mean=mean, std=std).to(self.device)
            emb = self.generator(noise)
            fake = self.decoder(emb, self.transformer.output_info)
            fake = torch.sigmoid(fake)
            data.append(fake.detach().cpu().numpy())
        data = np.concatenate(data, axis=0)
        data = data[:n]
        return self.transformer.inverse_transform(data) 
開發者ID:sdv-dev,項目名稱:SDGym,代碼行數:19,代碼來源:medgan.py

示例9: train_advreg_mmd

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def train_advreg_mmd(iter_cnt, encoder, gan_g, gan_d, corpus_loader, args, optimizer_reg):
    encoder.train()
    gan_g.train()
    gan_d.train()

    # train gan_disc
    for batch, labels in corpus_loader:
        optimizer_reg.zero_grad()

        batch = Variable(batch.cuda())
        z_real_hidden = encoder(batch)
        z_gauss = torch.normal(means=torch.zeros(batch.size()),
                               std=args.noise_radius)
        z_gauss = Variable(z_gauss.cuda())
        z_gauss_hidden = gan_g(z_gauss)

        loss_ar = gan_d(z_real_hidden, z_gauss_hidden)

        loss_ar.backward()
        optimizer_reg.step() 
開發者ID:jiangfeng1124,項目名稱:transfer,代碼行數:22,代碼來源:senti_unified.py

示例10: z2dec

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def z2dec(self, last_h, requires_grad):
        p_mu, p_logvar = self.c2z(last_h)
        if requires_grad:
            sample_z = self.gauss_connector(p_mu, p_logvar)
            joint_logpz = None
        else:
            sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
            logprob_sample_z = self.gaussian_logprob(p_mu, p_logvar, sample_z)
            joint_logpz = th.sum(logprob_sample_z.squeeze(0), dim=1)

        dec_init_state = self.z_embedding(sample_z)
        attn_context = None

        if self.config.dec_rnn_cell == 'lstm':
            dec_init_state = tuple([dec_init_state, dec_init_state])

        return dec_init_state, attn_context, joint_logpz 
開發者ID:snakeztc,項目名稱:NeuralDialog-LaRL,代碼行數:19,代碼來源:models_deal.py

示例11: encode

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def encode(self, indices, lengths, noise): 
        embeddings = self.embedding(indices)
        packed_embeddings = pack_padded_sequence(input=embeddings,
                                                 lengths=lengths,
                                                 batch_first=True)

        packed_output, state = self.encoder(packed_embeddings)
        hidden = state[0][-1]
        hidden = hidden / torch.norm(hidden, p=2, dim=1, keepdim=True)
        
        if noise and self.noise_r > 0:
            gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
                                       std=self.noise_r)
            hidden = hidden + Variable(gauss_noise.cuda())

        return hidden 
開發者ID:jakezhaojb,項目名稱:ARAE,代碼行數:18,代碼來源:models.py

示例12: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def forward(self, input):
        """
        Forward with all regularized connections and random activations (Beyesian mode). Typically used for train
        """
        if self.training == False: return F.linear(input, self.weights_clipped, self.bias)

        clip_mask = self.get_clip_mask()
        W = self.weight
        zeros = torch.zeros_like(W)
        mu = input.matmul(W.t())
        eps = 1e-8
        log_alpha = self.clip(self.log_alpha)
        si = torch.sqrt((input * input) \
                        .matmul(((torch.exp(log_alpha) * self.weight * self.weight)+eps).t()))
        activation = mu + torch.normal(torch.zeros_like(mu), torch.ones_like(mu)) * si
        return activation + self.bias 
開發者ID:HolyBayes,項目名稱:pytorch_ard,代碼行數:18,代碼來源:torch_ard.py

示例13: setUp

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def setUp(self):
        batch_size = 2
        rv_dimension = 5
        p = torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension))
        p_pos = torch.abs(torch.normal(torch.zeros(batch_size, rv_dimension), torch.ones(batch_size, rv_dimension)))
        p_pos = torch.clamp(p_pos, 0.1, 0.9)
        if cuda:
            p = p.cuda()
            p_pos = p_pos.cuda()
        p = Variable(p)
        p_pos = Variable(p_pos)
        self.rv = [
            stat.Normal(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Normal(p, p_pos),
            stat.Categorical(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Categorical(p_pos / torch.sum(p_pos, 1).expand_as(p_pos)),
            stat.Bernoulli(size=(batch_size, rv_dimension), cuda=cuda),
            stat.Bernoulli(p_pos),
            stat.Uniform(size=(batch_size, rv_dimension), cuda=cuda)
        ] 
開發者ID:stepelu,項目名稱:ptstat,代碼行數:22,代碼來源:test_core.py

示例14: truncated_normal

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def truncated_normal(model):
    std = math.sqrt(2./(model.in_features + model.out_features))
    if model.bias is not None:
        model.bias.data.zero_()
    model.weight.data.normal_(std=std)
    truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
    while truncate_me.sum() > 0:
        model.weight.data[truncate_me] = torch.normal(std=std*torch.ones(truncate_me.sum()))
        truncate_me = (model.weight.data > 2.*std) | (model.weight.data < -2.*std)
    return model 
開發者ID:kibok90,項目名稱:cvpr2018-hnd,代碼行數:12,代碼來源:models.py

示例15: forward_rl

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import normal [as 別名]
def forward_rl(self, data_feed, max_words, temp=0.1):
        ctx_lens = data_feed['context_lens']  # (batch_size, )
        short_ctx_utts = self.np2var(self.extract_short_ctx(data_feed['contexts'], ctx_lens), LONG)
        bs_label = self.np2var(data_feed['bs'], FLOAT)  # (batch_size, max_ctx_len, max_utt_len)
        db_label = self.np2var(data_feed['db'], FLOAT)  # (batch_size, max_ctx_len, max_utt_len)
        batch_size = len(ctx_lens)

        utt_summary, _, enc_outs = self.utt_encoder(short_ctx_utts.unsqueeze(1))

        # create decoder initial states
        enc_last = th.cat([bs_label, db_label, utt_summary.squeeze(1)], dim=1)
        # create decoder initial states
        p_mu, p_logvar = self.c2z(enc_last)

        sample_z = th.normal(p_mu, th.sqrt(th.exp(p_logvar))).detach()
        logprob_sample_z = self.gaussian_logprob(p_mu, self.zero, sample_z)
        joint_logpz = th.sum(logprob_sample_z, dim=1)

        # pack attention context
        dec_init_state = self.z_embedding(sample_z.unsqueeze(0))
        attn_context = None

        # decode
        if self.config.dec_rnn_cell == 'lstm':
            dec_init_state = tuple([dec_init_state, dec_init_state])

        # decode
        logprobs, outs = self.decoder.forward_rl(batch_size=batch_size,
                                                 dec_init_state=dec_init_state,
                                                 attn_context=attn_context,
                                                 vocab=self.vocab,
                                                 max_words=max_words,
                                                 temp=0.1)
        return logprobs, outs, joint_logpz, sample_z 
開發者ID:ConvLab,項目名稱:ConvLab,代碼行數:36,代碼來源:models_task.py


注:本文中的torch.normal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。