當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.std方法代碼示例

本文整理匯總了Python中torch.std方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.std方法的具體用法?Python torch.std怎麽用?Python torch.std使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.std方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_SDE

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def test_SDE(self):
        def f(x, a, s):
            return -a * x

        def g(x, a, s):
            return s

        em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10)
        model = LinearGaussianObservations(em, scale=1e-3)

        x, y = model.sample_path(500)

        for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]:
            filt = filt.initialize().longfilter(y)

            means = filt.result.filter_means
            if isinstance(filt, UKF):
                means = means[:, 0]

            self.assertLess(torch.std(x - means), 5e-2) 
開發者ID:tingiskhan,項目名稱:pyfilter,代碼行數:22,代碼來源:filters.py

示例2: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def forward(self, z):
        if z.size(-1) == 1:
            return z

        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        if self.affine:
            ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        # NOTE(nikita): the t2t code does the following instead, with eps=1e-6
        # However, I currently have no reason to believe that this difference in
        # implementation matters.
        # mu = torch.mean(z, keepdim=True, dim=-1)
        # variance = torch.mean((z - mu.expand_as(z))**2, keepdim=True, dim=-1)
        # ln_out = (z - mu.expand_as(z)) * torch.rsqrt(variance + self.eps).expand_as(z)
        # ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        return ln_out

# %% 
開發者ID:nikitakit,項目名稱:self-attentive-parser,代碼行數:23,代碼來源:parse_nk.py

示例3: comp

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def comp(self, inpu):
        in_mat1 = torch.triu(inpu.repeat(inpu.size(0), 1), diagonal=1)
        in_mat2 = torch.triu(inpu.repeat(inpu.size(0), 1).t(), diagonal=1)

        comp_first = (in_mat1 - in_mat2)
        comp_second = (in_mat2 - in_mat1)

        std1 = torch.std(comp_first).item()
        std2 = torch.std(comp_second).item()

        comp_first = torch.sigmoid(comp_first * (6.8 / std1))
        comp_second = torch.sigmoid(comp_second * (6.8 / std2))

        comp_first = torch.triu(comp_first, diagonal=1)
        comp_second = torch.triu(comp_second, diagonal=1)

        return (torch.sum(comp_first, 1) + torch.sum(comp_second, 0) + 1) / inpu.size(0) 
開發者ID:technicolor-research,項目名稱:sodeep,代碼行數:19,代碼來源:model.py

示例4: __call__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def __call__(self, x):
        if not self.auto:
            for idx in range(x.shape[0]):
                xmean = torch.mean(x[idx, :, :])
                xstd = torch.std(x[idx, :, :])
                x[idx, :, :] = (x[idx, :, :] - xmean) / xstd
                if xstd == 0:
                    x[idx, :, :] = 0.0
        else:
            view = x.view(x.shape[0], -1)
            length = view.shape[1]
            mean = view.mean(dim=1)
            var = view.var(dim=1)
            self.var = var / (self.count + 1) + self.count / (self.count + 1) * self.var
            self.var += self.count / ((self.count + 1) ** 2) * (self.mean - mean) ** 2
            self.mean = (self.count * self.mean + view.mean(dim=1)) / (self.count + 1)
            for idx in range(x.shape[0]):
                x[idx, :, :] = (x[idx, :, :] - self.mean) / torch.sqrt(self.var)
                if xstd == 0:
                    x[idx, :, :] = 0.0
        return x 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:23,代碼來源:transforms.py

示例5: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat = self.decoder.forward(z_sample, x_all)
        else:  
            z_all = z_context
            if self.type == 'ST':
                temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = 'cpu')
            elif self.type == 'MT':
                temp = torch.zeros([n,y_context.shape[0],1,y_context.shape[2],y_context.shape[3],
                                y_context.shape[4]], device = 'cpu')                                
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:] = self.decoder.forward(z_sample, x_context)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
        return y_hat, z_all, z_context, y_sigma
    
############################################################################### 
開發者ID:amarquand,項目名稱:nispat,代碼行數:25,代碼來源:NP.py

示例6: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        y_sigma_84 = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat, y_hat_84 = self.decoder.forward(z_sample)
        else:  
            z_all = z_context
            temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            temp_84 = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:], temp_84[i,:] = self.decoder.forward(z_sample)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            y_hat_84 = torch.mean(temp_84, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
                y_sigma_84 = torch.std(temp_84, dim=0).to(self.device)
        return y_hat, y_hat_84, z_all, z_context, y_sigma, y_sigma_84
    
############################################################################### 
開發者ID:amarquand,項目名稱:nispat,代碼行數:25,代碼來源:NPR.py

示例7: build

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def build(self, corpus, min_freq=1, embed=None):
        sequences = getattr(corpus, self.name)
        counter = Counter(token for sequence in sequences
                          for token in self.transform(sequence))
        self.vocab = Vocab(counter, min_freq, self.specials)

        if not embed:
            self.embed = None
        else:
            tokens = self.transform(embed.tokens)
            # if the `unk` token has existed in the pretrained,
            # then replace it with a self-defined one
            if embed.unk:
                tokens[embed.unk_index] = self.unk

            self.vocab.extend(tokens)
            self.embed = torch.zeros(len(self.vocab), embed.dim)
            self.embed[self.vocab.token2id(tokens)] = embed.vectors
            self.embed /= torch.std(self.embed) 
開發者ID:yzhangcs,項目名稱:parser,代碼行數:21,代碼來源:field.py

示例8: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def __init__(self, n_head, d_model, dropout):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
開發者ID:ne7ermore,項目名稱:torch-light,代碼行數:19,代碼來源:model.py

示例9: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def __init__(self, n_head, d_model, dropout=0.5):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
開發者ID:ne7ermore,項目名稱:torch-light,代碼行數:19,代碼來源:model.py

示例10: init_weights

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def init_weights(self):
        """Initialize weights of the head."""
        for m in self.cls_convs:
            normal_init(m.conv, std=0.01)
        for m in self.reg_convs:
            normal_init(m.conv, std=0.01)
        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.reppoints_cls_conv, std=0.01)
        normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
        normal_init(self.reppoints_pts_init_conv, std=0.01)
        normal_init(self.reppoints_pts_init_out, std=0.01)
        normal_init(self.reppoints_pts_refine_conv, std=0.01)
        normal_init(self.reppoints_pts_refine_out, std=0.01) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:15,代碼來源:reppoints_head.py

示例11: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def forward(self, z):
        if z.size(1) == 1:
            return z
        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)
        return ln_out 
開發者ID:AuCson,項目名稱:SEDST,代碼行數:10,代碼來源:rnn_net.py

示例12: extract_stats

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def extract_stats(opts):
    dset = build_dataset_providers(opts)
    collater_keys = dset[-1]
    dset = dset[0]
    collater = DictCollater()
    collater.batching_keys.extend(collater_keys)
    dloader = DataLoader(dset, batch_size = 100,
                         shuffle=True, collate_fn=collater,
                         num_workers=opts.num_workers)
    # Compute estimation of bpe. As we sample chunks randomly, we
    # should say that an epoch happened after seeing at least as many
    # chunks as total_train_wav_dur // chunk_size
    bpe = (dset.total_wav_dur // opts.chunk_size) // 500
    data = {}
    # run one epoch of training data to extract z-stats of minions
    for bidx, batch in enumerate(dloader, start=1):
        print('Bidx: {}/{}'.format(bidx, bpe))
        for k, v in batch.items():
            if k in opts.exclude_keys:
                continue
            if k not in data:
                data[k] = []
            data[k].append(v)

        if bidx >= opts.max_batches:
            break

    stats = {}
    data = dict((k, torch.cat(v)) for k, v in data.items())
    for k, v in data.items():
        stats[k] = {'mean':torch.mean(torch.mean(v, dim=2), dim=0),
                    'std':torch.std(torch.std(v, dim=2), dim=0)}
    with open(opts.out_file, 'wb') as stats_f:
        pickle.dump(stats, stats_f) 
開發者ID:santi-pdp,項目名稱:pase,代碼行數:36,代碼來源:make_trainset_statistics.py

示例13: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def __init__(self, mean=0.0, std=0.1):
        """Perturb an image by normally distributed additive noise."""
        self.mean = mean
        self.std = std 
開發者ID:mjendrusch,項目名稱:torchsupport,代碼行數:6,代碼來源:transforms.py

示例14: read_embeddings

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def read_embeddings(self, embed, unk=None):
        words = embed.words
        # if the UNK token has existed in pretrained vocab,
        # then replace it with a self-defined one
        if unk in embed:
            words[words.index(unk)] = self.UNK

        self.extend(words)
        self.embeddings = torch.zeros(self.n_words, embed.dim)

        for i, word in enumerate(self.words):
            if word in embed:
                self.embeddings[i] = embed[word]
        self.embeddings /= torch.std(self.embeddings) 
開發者ID:smilelight,項目名稱:lightNLP,代碼行數:16,代碼來源:vocab.py

示例15: mutualInformationLoss

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import std [as 別名]
def mutualInformationLoss(states, rewards_st, weight, loss_manager):
    """
    TODO: Equation needs to be fixed for faster computation
    Loss criterion to assess mutual information between predicted states and rewards
    see: https://en.wikipedia.org/wiki/Mutual_information
    :param states: (th.Tensor)
    :param rewards_st:(th.Tensor)
    :param weight: coefficient to weight the loss (float)
    :param loss_manager: loss criterion needed to log the loss value
    :return:
    """
    X = states
    Y = rewards_st
    I = 0
    eps = 1e-10
    p_x = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((X - th.mean(X, dim=0)) / (th.std(X, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    p_y = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((Y - th.mean(Y, dim=0)) / (th.std(Y, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    for x in range(X.shape[0]):
        for y in range(Y.shape[0]):
            p_xy = float(1 / np.sqrt(2 * np.pi)) * \
                   th.exp(-th.pow(th.norm((th.cat([X[x], Y[y]]) - th.mean(th.cat([X, Y], dim=1), dim=0)) /
                                          (th.std(th.cat([X, Y], dim=1), dim=0) + eps), 2), 2) / 2) + eps
            I += p_xy * th.log(p_xy / (p_x[x] * p_y[y]))

    mutual_info_loss = th.exp(-I)
    loss_manager.addToLosses('mutual_info', weight, mutual_info_loss)
    return weight * mutual_info_loss 
開發者ID:araffin,項目名稱:srl-zoo,代碼行數:31,代碼來源:losses.py


注:本文中的torch.std方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。