当前位置: 首页>>代码示例>>Python>>正文


Python torch.bernoulli方法代码示例

本文整理汇总了Python中torch.bernoulli方法的典型用法代码示例。如果您正苦于以下问题:Python torch.bernoulli方法的具体用法?Python torch.bernoulli怎么用?Python torch.bernoulli使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.bernoulli方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def forward(self, x0, x1, x2, x3):
        if self.p > 0 and self.training:
            coef = torch.bernoulli((1.0 - self.p) * torch.ones(8))
            out1 = coef[0] * self.block01(x0) + coef[1] * self.block11(x1) + coef[2] * self.block21(x2)
            out2 = coef[3] * self.block12(x1) + coef[4] * self.block22(x2) + coef[5] * self.block32(x3)
            out3 = coef[6] * self.block23(x2) + coef[7] * self.block33(x3)
        else:
            out1 = (1 - self.p) * (self.block01(x0) + self.block11(x1) + self.block21(x2))
            out2 = (1 - self.p) * (self.block12(x1) + self.block22(x2) + self.block32(x3))
            out3 = (1 - self.p) * (self.block23(x2) + self.block33(x3))

        if self.integrate:
            out1 += x1
            out2 += x2
            out3 += x3

        return x0, self.relu(out1), self.relu(out2), self.relu(out3) 
开发者ID:uci-cbcl,项目名称:DeepLung,代码行数:19,代码来源:layers.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def forward(self, x):
        if (not self.training or self.keep_prob==1): #set keep_prob=1 to turn off dropblock
            return x
        if self.gamma is None:
            self.gamma = self.calculate_gamma(x)
        if x.type() == 'torch.cuda.HalfTensor': #TODO: not fully support for FP16 now 
            FP16 = True
            x = x.float()
        else:
            FP16 = False
        p = torch.ones_like(x) * (self.gamma)
        mask = 1 - torch.nn.functional.max_pool2d(torch.bernoulli(p),
                                                  self.kernel_size,
                                                  self.stride,
                                                  self.padding)

        out =  mask * x * (mask.numel()/mask.sum())

        if FP16:
            out = out.half()
        return out 
开发者ID:ruinmessi,项目名称:ASFF,代码行数:23,代码来源:network_blocks.py

示例3: drop_word

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def drop_word(self, words):
        r"""
        按照设定随机将words设置为unknown_index。

        :param torch.LongTensor words: batch_size x max_len
        :return:
        """
        if self.word_dropout > 0 and self.training:
            with torch.no_grad():
                mask = torch.full_like(words, fill_value=self.word_dropout, dtype=torch.float, device=words.device)
                mask = torch.bernoulli(mask).eq(1)  # dropout_word越大,越多位置为1
                pad_mask = words.ne(self._word_pad_index)
                mask = pad_mask.__and__(mask)  # pad的位置不为unk
                if self._word_sep_index!=-100:
                    not_sep_mask = words.ne(self._word_sep_index)
                    mask = mask.__and__(not_sep_mask)
                if self._word_cls_index!=-100:
                    not_cls_mask = words.ne(self._word_cls_index)
                    mask = mask.__and__(not_cls_mask)
                words = words.masked_fill(mask, self._word_unk_index)
        return words 
开发者ID:fastnlp,项目名称:fastNLP,代码行数:23,代码来源:roberta_embedding.py

示例4: make_data

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def make_data(batch, augment=False,
              singleton_idx=None, unk_idx=None,
              ):
    sentences = batch.sentences
    tags, lengths = batch.tags

    letters, letters_lengths = batch.letters
    # Data augmentation for <unk> embedding training
    if augment:
        indices = torch.zeros_like(tags)
        bernoulli = torch.FloatTensor(*tags.shape,).fill_(.3)
        bernoulli = torch.bernoulli(bernoulli).byte()
        bernoulli = bernoulli.to(tags.device)
        indices = indices.byte()
        for rep in singleton_idx:
            indices = indices | (tags == rep)
        indices = indices & bernoulli
        sentences[indices] = unk_idx

    return sentences, tags, lengths, letters, letters_lengths 
开发者ID:arthurmensch,项目名称:didyprog,代码行数:22,代码来源:word_tagging.py

示例5: corrupt

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def corrupt(self, src, rel, dst, keep_truth=True):
        n = len(src)
        prob = self.bern_prob[rel]
        selection = torch.bernoulli(prob).numpy().astype('bool')
        src_out = np.tile(src.numpy(), (self.n_sample, 1)).transpose()
        dst_out = np.tile(dst.numpy(), (self.n_sample, 1)).transpose()
        rel_out = rel.unsqueeze(1).expand(n, self.n_sample)
        if keep_truth:
            ent_random = choice(self.n_ent, (n, self.n_sample - 1))
            src_out[selection, 1:] = ent_random[selection]
            dst_out[~selection, 1:] = ent_random[~selection]
        else:
            ent_random = choice(self.n_ent, (n, self.n_sample))
            src_out[selection, :] = ent_random[selection]
            dst_out[~selection, :] = ent_random[~selection]
        return torch.from_numpy(src_out), rel_out, torch.from_numpy(dst_out) 
开发者ID:cai-lw,项目名称:KBGAN,代码行数:18,代码来源:corrupter.py

示例6: train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def train(self):
        
        
        for epoch in range(10):
            for it, (x, y) in enumerate(self.data_loader):
                self.optim.zero_grad()
                
                x = torch.bernoulli(x)
                x = Variable(x.view(-1, 784))
                out = nn_.sigmoid(self.mdl(x)[:,:,0])
                loss = utils.bceloss(out, x).sum(1).mean()
                
                loss.backward()
                self.optim.step()
                
                if ((it + 1) % 10) == 0:
                    print 'Epoch: [%2d] [%4d/%4d] loss: %.8f' % \
                        (epoch+1, it+1, 
                         self.data_loader.dataset.__len__() // 32,
                         loss.data[0])
                 
                self.mdl.randomize() 
开发者ID:CW-Huang,项目名称:torchkit,代码行数:24,代码来源:model_made_bmnist.py

示例7: train

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def train(self):
        for epoch in range(10):
            for it, (x, y) in enumerate(self.data_loader):
                self.optim.zero_grad()
                x = torch.bernoulli(x)
                if cuda:
                    x = x.cuda()
                x = Variable(x.view(-1, 1, 28, 28))
                out = nn_.sigmoid(self.mdl((x,0))[0]).permute(0,3,1,2)
                loss = utils.bceloss(out, x).sum(1).sum(1).sum(1).mean()
                loss.backward()
                self.optim.step()
                if ((it + 1) % 100) == 0:
                    print 'Epoch: [%2d] [%4d/%4d] loss: %.8f' % \
                        (epoch+1, it+1, 
                         self.data_loader.dataset.__len__() // 32,
                         loss.data[0]) 
开发者ID:CW-Huang,项目名称:torchkit,代码行数:19,代码来源:model_pixelcnn_bmnist.py

示例8: test_rmax

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def test_rmax(self):
        # Connection test
        network = Network(dt=1.0)
        network.add_layer(Input(n=100, traces=True, traces_additive=True), name="input")
        network.add_layer(SRM0Nodes(n=100), name="output")
        network.add_connection(
            Connection(
                source=network.layers["input"],
                target=network.layers["output"],
                nu=1e-2,
                update_rule=Rmax,
            ),
            source="input",
            target="output",
        )
        network.run(
            inputs={"input": torch.bernoulli(torch.rand(250, 100)).byte()},
            time=250,
            reward=1.0,
        ) 
开发者ID:BindsNET,项目名称:bindsnet,代码行数:22,代码来源:test_learning.py

示例9: gen_inputs_labels

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def gen_inputs_labels(self, inputs, masked_indices):
        # We sample a few tokens in each sequence for masked-LM training (with probability mlm_probability defaults to 0.15 in Bert/RoBERTa)
        inputs = inputs.clone()
        labels = inputs.clone()
        labels[~masked_indices] = -100  # We only compute loss on masked tokens

        # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
        indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
        inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)

        # 10% of the time, we replace masked input tokens with random word
        indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
        random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)
        inputs[indices_random] = random_words[indices_random]
        # The rest of the time (10% of the time) we keep the masked input tokens unchanged
        return inputs, labels 
开发者ID:howardhsu,项目名称:BERT-for-RRC-ABSA,代码行数:18,代码来源:masker.py

示例10: sample

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def sample(self, n_samples=1, resample=False):
        """
        Draw samples from the distribution.

        Args:
            n_samples (int): number of samples to draw
            resample (bool): whether to resample or just use current sample
        """
        if self._sample is None or resample:
            assert self.mean is not None, 'Mean is None.'
            mean = self.mean
            if len(mean.size()) == 2:
                mean = mean.unsqueeze(1).repeat(1, n_samples, 1)
            elif len(mean.size()) == 4:
                mean = mean.unsqueeze(1).repeat(1, n_samples, 1, 1, 1)
            self._sample = torch.bernoulli(mean)
        return self._sample 
开发者ID:joelouismarino,项目名称:amortized-variational-filtering,代码行数:19,代码来源:bernoulli.py

示例11: mask_tokens

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def mask_tokens(inputs, tokenizer, args):
    """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
    labels = inputs.clone()
    # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
    probability_matrix = torch.full(labels.shape, args.mlm_probability)
    special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
    probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -1  # We only compute loss on masked tokens

    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)

    # 10% of the time, we replace masked input tokens with random word
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
    inputs[indices_random] = random_words[indices_random]

    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels 
开发者ID:mgrankin,项目名称:ru_transformers,代码行数:23,代码来源:run_lm_finetuning.py

示例12: mask_tokens

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def mask_tokens(inputs, tokenizer, args):
    """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """
    labels = inputs.clone()
    # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
    probability_matrix = torch.full(labels.shape, args.mlm_probability)
    special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()]
    probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
    masked_indices = torch.bernoulli(probability_matrix).bool()
    labels[~masked_indices] = -1  # We only compute loss on masked tokens

    # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
    indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
    inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)

    # 10% of the time, we replace masked input tokens with random word
    indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
    random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
    inputs[indices_random] = random_words[indices_random]

    # The rest of the time (10% of the time) we keep the masked input tokens unchanged
    return inputs, labels

# from transformers/modeling_utils.py, adapted to tpu 
开发者ID:mgrankin,项目名称:ru_transformers,代码行数:25,代码来源:debug_lm.py

示例13: draw

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def draw(self, *size):
        """Draw N samples from multinomial

        Args:
            - size: the output size of samples
        """
        max_value = self.alias.size(0)

        kk = self.alias.new(*size).random_(0, max_value).long().view(-1)
        prob = self.prob[kk]
        alias = self.alias[kk]
        # b is whether a random number is greater than q
        b = torch.bernoulli(prob).long()
        oq = kk.mul(b)
        oj = alias.mul(1 - b)

        return (oq + oj).view(size) 
开发者ID:Stonesjtu,项目名称:Pytorch-NCE,代码行数:19,代码来源:alias_multinomial.py

示例14: run_episode

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def run_episode(env, weight):
    state = env.reset()
    grads = []
    total_reward = 0
    is_done = False
    while not is_done:
        state = torch.from_numpy(state).float()
        z = torch.matmul(state, weight)
        probs = torch.nn.Softmax()(z)
        action = int(torch.bernoulli(probs[1]).item())
        d_softmax = torch.diag(probs) - probs.view(-1, 1) * probs
        d_log = d_softmax[action] / probs[action]
        grad = state.view(-1, 1) * d_log
        grads.append(grad)
        state, reward, is_done, _ = env.step(action)
        total_reward += reward
        if is_done:
            break
    return total_reward, grads 
开发者ID:PacktPublishing,项目名称:PyTorch-1.x-Reinforcement-Learning-Cookbook,代码行数:21,代码来源:policy_gradient.py

示例15: same_dropout

# 需要导入模块: import torch [as 别名]
# 或者: from torch import bernoulli [as 别名]
def same_dropout(data_tensor, dropout_p, dim, is_model_training):
    """
    Drops the same random elements of a Tensor across the specified dimension, during training.

    :param data_tensor: ND Tensor.
    :param dropout_p: The dropout probability.
    :param dim: Int that corresponds to the dimension.
    :param is_model_training: Whether the model is currently training.
    :return: ND Tensor.
    """

    if dim < 0:
        dim = len(data_tensor.shape) + dim

    if dropout_p is None or dropout_p == 0 or not is_model_training:
        return data_tensor

    assert 0 <= dropout_p < 1, 'dropout probability must be in range [0,1)'

    shape = list(data_tensor.shape)
    shape[dim] = 1
    dp = torch.empty(*shape, dtype=torch.float, device=data_tensor.device)
    dp = torch.bernoulli(dp.fill_((1 - dropout_p))) / (1 - dropout_p)

    return data_tensor * dp 
开发者ID:jkoutsikakis,项目名称:pytorch-wrapper,代码行数:27,代码来源:functional.py


注:本文中的torch.bernoulli方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。