當前位置: 首頁>>代碼示例>>Python>>正文


Python multivariate_normal.MultivariateNormal方法代碼示例

本文整理匯總了Python中torch.distributions.multivariate_normal.MultivariateNormal方法的典型用法代碼示例。如果您正苦於以下問題:Python multivariate_normal.MultivariateNormal方法的具體用法?Python multivariate_normal.MultivariateNormal怎麽用?Python multivariate_normal.MultivariateNormal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch.distributions.multivariate_normal的用法示例。


在下文中一共展示了multivariate_normal.MultivariateNormal方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def __init__(self, nnodes, nfeat, nhid, nclass, gamma=1.0, beta1=5e-4, beta2=5e-4, lr=0.01, dropout=0.6, device='cpu'):
        super(RGCN, self).__init__()

        self.device = device
        # adj_norm = normalize(adj)
        # first turn original features to distribution
        self.lr = lr
        self.gamma = gamma
        self.beta1 = beta1
        self.beta2 = beta2
        self.nclass = nclass
        self.nhid = nhid // 2
        # self.gc1 = GaussianConvolution(nfeat, nhid, dropout=dropout)
        # self.gc2 = GaussianConvolution(nhid, nclass, dropout)
        self.gc1 = GGCL_F(nfeat, nhid, dropout=dropout)
        self.gc2 = GGCL_D(nhid, nclass, dropout=dropout)

        self.dropout = dropout
        # self.gaussian = MultivariateNormal(torch.zeros(self.nclass), torch.eye(self.nclass))
        self.gaussian = MultivariateNormal(torch.zeros(nnodes, self.nclass),
                torch.diag_embed(torch.ones(nnodes, self.nclass)))
        self.adj_norm1, self.adj_norm2 = None, None
        self.features, self.labels = None, None 
開發者ID:DSE-MSU,項目名稱:DeepRobust,代碼行數:25,代碼來源:r_gcn.py

示例2: multi_variate_gaussian_policy

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def multi_variate_gaussian_policy(self, obs):
        """
        Calcula una distribución gaussiana multivariada del tamaño de las acciones usando las observaciones 
        :param obs: Observaciones del agente
        :return: policy, una distribución sobre las acciones dadas las observaciones actuales
        """
        mu, sigma = self.actor(obs)
        value = self.critic(obs)
        #Clamp de cada dimensión de mu basándonos en cada uno de los límites de los espacios vectoriales de acciones (low, high)
        #x.Clamp(a,b) manitiene a x entre los valores a y b
        [mu[:,i].clamp_(float(self.env.action_space.low[i]), float(self.env.action_space.high[i])) for i in range(self.action_shape)]
        #Suavizar el valor de sigma
        sigma = torch.nn.Softplus()(sigma).squeeze() + 1e-7 
        
        self.mu = mu.to(torch.device("cpu"))
        self.sigma = sigma.to(torch.device("cpu"))
        self.value = value.to(torch.device("cpu"))
        
        if len(self.mu.shape) == 0: #mu es un escalar
            self.mu.unsqueeze_(0) #evitará que la multivariante noral de un error
            
        self.action_distribution = MultivariateNormal(self.mu, torch.eye(self.action_shape) * self.sigma, validate_args = True)
        return self.action_distribution 
開發者ID:joanby,項目名稱:ia-course,代碼行數:25,代碼來源:a2c.py

示例3: multi_variate_gaussian_policy

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def multi_variate_gaussian_policy(self, obs):
        """
        Calculates a multi-variate gaussian distribution over actions given observations
        :param obs: Agent's observation
        :return: policy, a distribution over actions for the given observation
        """
        mu, sigma = self.actor(obs)
        value = self.critic(obs)
        [ mu[:, i].clamp_(float(self.env.action_space.low[i]), float(self.env.action_space.high[i]))
         for i in range(self.action_shape)]  # Clamp each dim of mu based on the (low,high) limits of that action dim
        sigma = torch.nn.Softplus()(sigma).squeeze() + 1e-7  # Let sigma be (smoothly) +ve
        self.mu = mu.to(torch.device("cpu"))
        self.sigma = sigma.to(torch.device("cpu"))
        self.value = value.to(torch.device("cpu"))
        if len(self.mu.shape) == 0: # See if mu is a scalar
            #self.mu = self.mu.unsqueeze(0)  # This prevents MultivariateNormal from crashing with SIGFPE
            self.mu.unsqueeze_(0)
        self.action_distribution = MultivariateNormal(self.mu, torch.eye(self.action_shape) * self.sigma, validate_args=True)
        return self.action_distribution 
開發者ID:PacktPublishing,項目名稱:Hands-On-Intelligent-Agents-with-OpenAI-Gym,代碼行數:21,代碼來源:a2c_agent.py

示例4: multi_variate_gaussian_policy

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def multi_variate_gaussian_policy(self, obs):
        """
        Calculates a multi-variate gaussian distribution over actions given observations
        :param obs: Agent's observation
        :return: policy, a distribution over actions for the given observation
        """
        mu, sigma = self.actor(obs)
        value = self.critic(obs)
        [ mu[:, i].clamp_(float(self.env.action_space.low[i]), float(self.env.action_space.high[i]))
         for i in range(self.action_shape)]  # Clamp each dim of mu based on the (low,high) limits of that action dim
        sigma = torch.nn.Softplus()(sigma).squeeze() + 1e-7  # Let sigma be (smoothly) +ve
        self.mu = mu.to(torch.device("cpu"))
        self.sigma = sigma.to(torch.device("cpu"))
        self.value = value.to(torch.device("cpu"))
        if len(self.mu.shape) == 0: # See if mu is a scalar
            # self.mu = self.mu.unsqueeze(0)  # This prevents MultivariateNormal from crashing with SIGFPE
            self.mu.unsqueeze_(0)
        self.action_distribution = MultivariateNormal(self.mu, torch.eye(self.action_shape) * self.sigma, validate_args=True)
        return self.action_distribution 
開發者ID:PacktPublishing,項目名稱:Hands-On-Intelligent-Agents-with-OpenAI-Gym,代碼行數:21,代碼來源:async_a2c_agent.py

示例5: multi_variate_gaussian_policy

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def multi_variate_gaussian_policy(self, obs):
        """
        Calculates a multi-variate gaussian distribution over actions given observations
        :param obs: Agent's observation
        :return: policy, a distribution over actions for the given observation
        """
        mu, sigma = self.actor(obs)
        value = self.critic(obs).squeeze()
        [ mu[:, i].clamp_(float(self.envs.action_space.low[i]), float(self.envs.action_space.high[i]))
         for i in range(self.action_shape)]  # Clamp each dim of mu based on the (low,high) limits of that action dim
        sigma = torch.nn.Softplus()(sigma) + 1e-7  # Let sigma be (smoothly) +ve
        self.mu = mu.to(torch.device("cpu"))
        self.sigma = sigma.to(torch.device("cpu"))
        self.value = value.to(torch.device("cpu"))
        if len(self.mu[0].shape) == 0: # See if mu is a scalar
            self.mu = self.mu.unsqueeze(0)  # This prevents MultivariateNormal from crashing with SIGFPE
        self.covariance = torch.stack([torch.eye(self.action_shape) * s for s in self.sigma])
        if self.action_shape == 1:
            self.covariance = self.sigma.unsqueeze(-1)  # Make the covariance a square mat to avoid RuntimeError with MultivariateNormal
        self.action_distribution = MultivariateNormal(self.mu, self.covariance)
        return self.action_distribution 
開發者ID:PacktPublishing,項目名稱:Hands-On-Intelligent-Agents-with-OpenAI-Gym,代碼行數:23,代碼來源:batched_a2c_agent.py

示例6: log_likelihood

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def log_likelihood(theta, x):
    with torch.no_grad():
        input = theta
        mean = torch.tensor([input[0], input[1]])
        scale = 1.0
        s_1 = input[2] ** 2
        s_2 = input[3] ** 2
        rho = input[4].tanh()
        covariance = torch.tensor([
            [scale * s_1 ** 2, scale * rho * s_1 * s_2],
            [scale * rho * s_1 * s_2, scale * s_2 ** 2]])
        normal = Normal(mean, covariance)
        m = x.view(-1, 2)
        log_likelihood = normal.log_prob(m).sum()

    return log_likelihood 
開發者ID:montefiore-ai,項目名稱:hypothesis,代碼行數:18,代碼來源:util.py

示例7: soft_rank_sampling

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def soft_rank_sampling(loc, covariance_matrix=None, inds_style=True, descending=True):
    '''
    :param loc: mean of the distribution
    :param covariance_matrix: positive-definite covariance matrix
    :param inds_style: true means the indice leading to the ltr_adhoc
    :return:
    '''
    m = MultivariateNormal(loc, covariance_matrix)
    vals = m.sample()
    if inds_style:
        sorted_inds = torch.argsort(vals, descending=descending)
        return sorted_inds
    else:
        vals 
開發者ID:pt-ranking,項目名稱:pt-ranking.github.io,代碼行數:16,代碼來源:pt_extensions.py

示例8: _allocate_pertubator

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def _allocate_pertubator(self):
        dimensionality = self.covariance.dim()
        if dimensionality <= 1:
            pertubator = Normal(0, self.covariance)
        else:
            zeros = torch.zeros(dimensionality)
            pertubator = MultivariateNormal(zeros, covariance_matrix=self.covariance)
        self.pertubator = pertubator 
開發者ID:montefiore-ai,項目名稱:hypothesis,代碼行數:10,代碼來源:abc_smc.py

示例9: log_prob

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def log_prob(self, mean, conditionals):
        normal = MultivariateNormalDistribution(mean, self.sigma)

        return normal.log_prob(conditionals) 
開發者ID:montefiore-ai,項目名稱:hypothesis,代碼行數:6,代碼來源:transition_distribution.py

示例10: sample

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def sample(self, means, samples=1):
        x = []

        with torch.no_grad():
            means = means.view(-1, self.dimensionality)
            mean_samples = torch.Size([samples])
            for mean in means:
                normal = MultivariateNormalDistribution(mean, self.sigma)
                x.append(normal.sample(mean_samples).view(-1, samples, self.dimensionality))
            x = torch.cat(x, dim=0).squeeze()

        return x 
開發者ID:montefiore-ai,項目名稱:hypothesis,代碼行數:14,代碼來源:transition_distribution.py

示例11: _generate

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def _generate(self, input):
        mean = torch.tensor([input[0], input[1]])
        scale = 1.0
        s_1 = input[2] ** 2
        s_2 = input[3] ** 2
        rho = input[4].tanh()
        covariance = torch.tensor([
            [scale * s_1 ** 2, scale * rho * s_1 * s_2],
            [scale * rho * s_1 * s_2, scale * s_2 ** 2]])
        normal = Normal(mean, covariance)
        x_out = normal.sample(torch.Size([4])).view(1, -1)

        return x_out 
開發者ID:montefiore-ai,項目名稱:hypothesis,代碼行數:15,代碼來源:simulator.py

示例12: __init__

# 需要導入模塊: from torch.distributions import multivariate_normal [as 別名]
# 或者: from torch.distributions.multivariate_normal import MultivariateNormal [as 別名]
def __init__(self, args):
        super(VAE, self).__init__()

        # extract model settings from args
        self.args = args
        self.z_size = args.z_size
        self.input_size = args.input_size
        self.input_type = args.input_type
        self.gen_hiddens = args.gen_hiddens


        if self.input_size == [1, 28, 28] or self.input_size == [3, 28, 28]:
            self.last_kernel_size = 7
        elif self.input_size == [1, 28, 20]:
            self.last_kernel_size = (7, 5)
        elif self.input_size == [3, 32, 32]:
            self.last_kernel_size = 8
        else:
            if self.args.dataset=='permuted_mnist':
                # this dataset has no 3D structure
                assert self.input_size == [784]
                assert self.args.gen_architecture == 'MLP'
            else:
                raise ValueError('invalid input size!!')

        self.q_z_nn, self.q_z_mean, self.q_z_var = self.create_encoder()
        self.p_x_nn, self.p_x_mean = self.create_decoder()

        self.q_z_nn_output_dim = 256

        # auxiliary
        if args.cuda:
            self.FloatTensor = torch.cuda.FloatTensor
        else:
            self.FloatTensor = torch.FloatTensor

        # log-det-jacobian = 0 without flows
        self.log_det_j = Variable(self.FloatTensor(1).zero_())

        self.prior = MultivariateNormal(torch.zeros(args.z_size), torch.eye(args.z_size))

        # get gradient dimension:
        self.grad_dims = []
        for param in self.parameters():
            self.grad_dims.append(param.data.numel()) 
開發者ID:optimass,項目名稱:Maximally_Interfered_Retrieval,代碼行數:47,代碼來源:VAE.py


注:本文中的torch.distributions.multivariate_normal.MultivariateNormal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。