当前位置: 首页>>代码示例>>Python>>正文


Python torch.std方法代码示例

本文整理汇总了Python中torch.std方法的典型用法代码示例。如果您正苦于以下问题:Python torch.std方法的具体用法?Python torch.std怎么用?Python torch.std使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch的用法示例。


在下文中一共展示了torch.std方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_SDE

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def test_SDE(self):
        def f(x, a, s):
            return -a * x

        def g(x, a, s):
            return s

        em = AffineEulerMaruyama((f, g), (0.02, 0.15), Normal(0., 1.), Normal(0., 1.), dt=1e-2, num_steps=10)
        model = LinearGaussianObservations(em, scale=1e-3)

        x, y = model.sample_path(500)

        for filt in [SISR(model, 500, proposal=Bootstrap()), UKF(model)]:
            filt = filt.initialize().longfilter(y)

            means = filt.result.filter_means
            if isinstance(filt, UKF):
                means = means[:, 0]

            self.assertLess(torch.std(x - means), 5e-2) 
开发者ID:tingiskhan,项目名称:pyfilter,代码行数:22,代码来源:filters.py

示例2: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def forward(self, z):
        if z.size(-1) == 1:
            return z

        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        if self.affine:
            ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        # NOTE(nikita): the t2t code does the following instead, with eps=1e-6
        # However, I currently have no reason to believe that this difference in
        # implementation matters.
        # mu = torch.mean(z, keepdim=True, dim=-1)
        # variance = torch.mean((z - mu.expand_as(z))**2, keepdim=True, dim=-1)
        # ln_out = (z - mu.expand_as(z)) * torch.rsqrt(variance + self.eps).expand_as(z)
        # ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)

        return ln_out

# %% 
开发者ID:nikitakit,项目名称:self-attentive-parser,代码行数:23,代码来源:parse_nk.py

示例3: comp

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def comp(self, inpu):
        in_mat1 = torch.triu(inpu.repeat(inpu.size(0), 1), diagonal=1)
        in_mat2 = torch.triu(inpu.repeat(inpu.size(0), 1).t(), diagonal=1)

        comp_first = (in_mat1 - in_mat2)
        comp_second = (in_mat2 - in_mat1)

        std1 = torch.std(comp_first).item()
        std2 = torch.std(comp_second).item()

        comp_first = torch.sigmoid(comp_first * (6.8 / std1))
        comp_second = torch.sigmoid(comp_second * (6.8 / std2))

        comp_first = torch.triu(comp_first, diagonal=1)
        comp_second = torch.triu(comp_second, diagonal=1)

        return (torch.sum(comp_first, 1) + torch.sum(comp_second, 0) + 1) / inpu.size(0) 
开发者ID:technicolor-research,项目名称:sodeep,代码行数:19,代码来源:model.py

示例4: __call__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def __call__(self, x):
        if not self.auto:
            for idx in range(x.shape[0]):
                xmean = torch.mean(x[idx, :, :])
                xstd = torch.std(x[idx, :, :])
                x[idx, :, :] = (x[idx, :, :] - xmean) / xstd
                if xstd == 0:
                    x[idx, :, :] = 0.0
        else:
            view = x.view(x.shape[0], -1)
            length = view.shape[1]
            mean = view.mean(dim=1)
            var = view.var(dim=1)
            self.var = var / (self.count + 1) + self.count / (self.count + 1) * self.var
            self.var += self.count / ((self.count + 1) ** 2) * (self.mean - mean) ** 2
            self.mean = (self.count * self.mean + view.mean(dim=1)) / (self.count + 1)
            for idx in range(x.shape[0]):
                x[idx, :, :] = (x[idx, :, :] - self.mean) / torch.sqrt(self.var)
                if xstd == 0:
                    x[idx, :, :] = 0.0
        return x 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:23,代码来源:transforms.py

示例5: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat = self.decoder.forward(z_sample, x_all)
        else:  
            z_all = z_context
            if self.type == 'ST':
                temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = 'cpu')
            elif self.type == 'MT':
                temp = torch.zeros([n,y_context.shape[0],1,y_context.shape[2],y_context.shape[3],
                                y_context.shape[4]], device = 'cpu')                                
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:] = self.decoder.forward(z_sample, x_context)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
        return y_hat, z_all, z_context, y_sigma
    
############################################################################### 
开发者ID:amarquand,项目名称:nispat,代码行数:25,代码来源:NP.py

示例6: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def forward(self, x_context, y_context, x_all=None, y_all=None, n = 10):
        y_sigma = None
        y_sigma_84 = None
        z_context = self.xy_to_z_params(x_context, y_context)
        if self.training:
            z_all = self.xy_to_z_params(x_all, y_all)
            z_sample = self.reparameterise(z_all)
            y_hat, y_hat_84 = self.decoder.forward(z_sample)
        else:  
            z_all = z_context
            temp = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            temp_84 = torch.zeros([n,y_context.shape[0], y_context.shape[2]], device = self.device)
            for i in range(n):
                z_sample = self.reparameterise(z_all)
                temp[i,:], temp_84[i,:] = self.decoder.forward(z_sample)
            y_hat = torch.mean(temp, dim=0).to(self.device)
            y_hat_84 = torch.mean(temp_84, dim=0).to(self.device)
            if n > 1:
                y_sigma = torch.std(temp, dim=0).to(self.device)
                y_sigma_84 = torch.std(temp_84, dim=0).to(self.device)
        return y_hat, y_hat_84, z_all, z_context, y_sigma, y_sigma_84
    
############################################################################### 
开发者ID:amarquand,项目名称:nispat,代码行数:25,代码来源:NPR.py

示例7: build

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def build(self, corpus, min_freq=1, embed=None):
        sequences = getattr(corpus, self.name)
        counter = Counter(token for sequence in sequences
                          for token in self.transform(sequence))
        self.vocab = Vocab(counter, min_freq, self.specials)

        if not embed:
            self.embed = None
        else:
            tokens = self.transform(embed.tokens)
            # if the `unk` token has existed in the pretrained,
            # then replace it with a self-defined one
            if embed.unk:
                tokens[embed.unk_index] = self.unk

            self.vocab.extend(tokens)
            self.embed = torch.zeros(len(self.vocab), embed.dim)
            self.embed[self.vocab.token2id(tokens)] = embed.vectors
            self.embed /= torch.std(self.embed) 
开发者ID:yzhangcs,项目名称:parser,代码行数:21,代码来源:field.py

示例8: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def __init__(self, n_head, d_model, dropout):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:19,代码来源:model.py

示例9: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def __init__(self, n_head, d_model, dropout=0.5):
        super().__init__()
        self.n_head = n_head
        self.d_v = self.d_k = d_k = d_model // n_head

        for name in ["w_qs", "w_ks", "w_vs"]:
            self.__setattr__(name,
                             nn.Parameter(torch.FloatTensor(n_head, d_model, d_k)))

        self.attention = ScaledDotProductAttention(d_k, dropout)
        self.lm = LayerNorm(d_model)
        self.w_o = nn.Linear(d_model, d_model, bias=False)
        self.dropout = nn.Dropout(dropout)

        self.w_qs.data.normal_(std=const.INIT_RANGE)
        self.w_ks.data.normal_(std=const.INIT_RANGE)
        self.w_vs.data.normal_(std=const.INIT_RANGE) 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:19,代码来源:model.py

示例10: init_weights

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def init_weights(self):
        """Initialize weights of the head."""
        for m in self.cls_convs:
            normal_init(m.conv, std=0.01)
        for m in self.reg_convs:
            normal_init(m.conv, std=0.01)
        bias_cls = bias_init_with_prob(0.01)
        normal_init(self.reppoints_cls_conv, std=0.01)
        normal_init(self.reppoints_cls_out, std=0.01, bias=bias_cls)
        normal_init(self.reppoints_pts_init_conv, std=0.01)
        normal_init(self.reppoints_pts_init_out, std=0.01)
        normal_init(self.reppoints_pts_refine_conv, std=0.01)
        normal_init(self.reppoints_pts_refine_out, std=0.01) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:15,代码来源:reppoints_head.py

示例11: forward

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def forward(self, z):
        if z.size(1) == 1:
            return z
        mu = torch.mean(z, keepdim=True, dim=-1)
        sigma = torch.std(z, keepdim=True, dim=-1)
        ln_out = (z - mu.expand_as(z)) / (sigma.expand_as(z) + self.eps)
        ln_out = ln_out * self.a_2.expand_as(ln_out) + self.b_2.expand_as(ln_out)
        return ln_out 
开发者ID:AuCson,项目名称:SEDST,代码行数:10,代码来源:rnn_net.py

示例12: extract_stats

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def extract_stats(opts):
    dset = build_dataset_providers(opts)
    collater_keys = dset[-1]
    dset = dset[0]
    collater = DictCollater()
    collater.batching_keys.extend(collater_keys)
    dloader = DataLoader(dset, batch_size = 100,
                         shuffle=True, collate_fn=collater,
                         num_workers=opts.num_workers)
    # Compute estimation of bpe. As we sample chunks randomly, we
    # should say that an epoch happened after seeing at least as many
    # chunks as total_train_wav_dur // chunk_size
    bpe = (dset.total_wav_dur // opts.chunk_size) // 500
    data = {}
    # run one epoch of training data to extract z-stats of minions
    for bidx, batch in enumerate(dloader, start=1):
        print('Bidx: {}/{}'.format(bidx, bpe))
        for k, v in batch.items():
            if k in opts.exclude_keys:
                continue
            if k not in data:
                data[k] = []
            data[k].append(v)

        if bidx >= opts.max_batches:
            break

    stats = {}
    data = dict((k, torch.cat(v)) for k, v in data.items())
    for k, v in data.items():
        stats[k] = {'mean':torch.mean(torch.mean(v, dim=2), dim=0),
                    'std':torch.std(torch.std(v, dim=2), dim=0)}
    with open(opts.out_file, 'wb') as stats_f:
        pickle.dump(stats, stats_f) 
开发者ID:santi-pdp,项目名称:pase,代码行数:36,代码来源:make_trainset_statistics.py

示例13: __init__

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def __init__(self, mean=0.0, std=0.1):
        """Perturb an image by normally distributed additive noise."""
        self.mean = mean
        self.std = std 
开发者ID:mjendrusch,项目名称:torchsupport,代码行数:6,代码来源:transforms.py

示例14: read_embeddings

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def read_embeddings(self, embed, unk=None):
        words = embed.words
        # if the UNK token has existed in pretrained vocab,
        # then replace it with a self-defined one
        if unk in embed:
            words[words.index(unk)] = self.UNK

        self.extend(words)
        self.embeddings = torch.zeros(self.n_words, embed.dim)

        for i, word in enumerate(self.words):
            if word in embed:
                self.embeddings[i] = embed[word]
        self.embeddings /= torch.std(self.embeddings) 
开发者ID:smilelight,项目名称:lightNLP,代码行数:16,代码来源:vocab.py

示例15: mutualInformationLoss

# 需要导入模块: import torch [as 别名]
# 或者: from torch import std [as 别名]
def mutualInformationLoss(states, rewards_st, weight, loss_manager):
    """
    TODO: Equation needs to be fixed for faster computation
    Loss criterion to assess mutual information between predicted states and rewards
    see: https://en.wikipedia.org/wiki/Mutual_information
    :param states: (th.Tensor)
    :param rewards_st:(th.Tensor)
    :param weight: coefficient to weight the loss (float)
    :param loss_manager: loss criterion needed to log the loss value
    :return:
    """
    X = states
    Y = rewards_st
    I = 0
    eps = 1e-10
    p_x = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((X - th.mean(X, dim=0)) / (th.std(X, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    p_y = float(1 / np.sqrt(2 * np.pi)) * \
          th.exp(-th.pow(th.norm((Y - th.mean(Y, dim=0)) / (th.std(Y, dim=0) + eps), 2, dim=1), 2) / 2) + eps
    for x in range(X.shape[0]):
        for y in range(Y.shape[0]):
            p_xy = float(1 / np.sqrt(2 * np.pi)) * \
                   th.exp(-th.pow(th.norm((th.cat([X[x], Y[y]]) - th.mean(th.cat([X, Y], dim=1), dim=0)) /
                                          (th.std(th.cat([X, Y], dim=1), dim=0) + eps), 2), 2) / 2) + eps
            I += p_xy * th.log(p_xy / (p_x[x] * p_y[y]))

    mutual_info_loss = th.exp(-I)
    loss_manager.addToLosses('mutual_info', weight, mutual_info_loss)
    return weight * mutual_info_loss 
开发者ID:araffin,项目名称:srl-zoo,代码行数:31,代码来源:losses.py


注:本文中的torch.std方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。