当前位置: 首页>>代码示例>>Python>>正文


Python Variable.expand_as方法代码示例

本文整理汇总了Python中torch.autograd.Variable.expand_as方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.expand_as方法的具体用法?Python Variable.expand_as怎么用?Python Variable.expand_as使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.expand_as方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mixup_data_hidden

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
def mixup_data_hidden(input, target,  mixup_alpha):
    if mixup_alpha > 0.:
        lam = np.random.beta(mixup_alpha, mixup_alpha)
    else:
        lam = 1.
    lam = torch.from_numpy(np.array([lam]).astype('float32')).cuda()
    lam = Variable(lam)
    indices = np.random.permutation(input.size(0))
    output = input*lam.expand_as(input) + input[indices]*(1-lam.expand_as(input))
    target_a, target_b = target ,target[indices]
    
    return output, target_a, target_b, lam
开发者ID:kazk1018,项目名称:manifold_mixup,代码行数:14,代码来源:wide_resnet_manifold_mixup.py

示例2: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
 def forward(self, x, dropout=0.5):
     if not self.training or not dropout:
         return x
     m = x.data.new(1, x.size(1), x.size(2)).bernoulli_(1 - dropout)
     mask = Variable(m, requires_grad=False) / (1 - dropout)
     mask = mask.expand_as(x)
     return mask * x
开发者ID:batermj,项目名称:awd-lstm-lm,代码行数:9,代码来源:locked_dropout.py

示例3: TorchSVM

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
    class TorchSVM(TorchKernelBase):
        TorchSVMTiming = Timing()

        def __init__(self, **kwargs):
            super(TorchSVM, self).__init__(**kwargs)
            self._fit_args, self._fit_args_names = [1e-3], ["tol"]
            self._batch_size = kwargs.get("batch_size", 128)
            self._optimizer = kwargs.get("optimizer", "Adam")
            self._train_repeat = None

        @TorchSVMTiming.timeit(level=1, prefix="[Core] ")
        def _loss(self, y, y_pred, sample_weight):
            return torch.sum(
                torch.clamp(1 - y * y_pred, min=0) * sample_weight
            ) + 0.5 * (y_pred - self._b.expand_as(y_pred)).unsqueeze(0).mm(self._w)

        def _prepare(self, sample_weight, **kwargs):
            lr = kwargs.get("lr", self._params["lr"])
            self._w = Variable(torch.zeros([len(self._x), 1]), requires_grad=True)
            self._b = Variable(torch.Tensor([.0]), requires_grad=True)
            self._model_parameters = [self._w, self._b]
            self._optimizer = PyTorchOptFac().get_optimizer_by_name(
                self._optimizer, self._model_parameters, lr, self._params["epoch"]
            )
            sample_weight, = self._arr_to_variable(False, sample_weight)
            self._loss_function = lambda y, y_pred: self._loss(y, y_pred, sample_weight)

        @TorchSVMTiming.timeit(level=1, prefix="[Core] ")
        def _fit(self, sample_weight, tol):
            if self._train_repeat is None:
                self._train_repeat = self._get_train_repeat(self._x, self._batch_size)
            l = self.batch_training(
                self._gram, self._y, self._batch_size, self._train_repeat,
                self._loss_function
            )
            if l < tol:
                return True

        @TorchSVMTiming.timeit(level=1, prefix="[Core] ")
        def _predict(self, x, get_raw_results=False, **kwargs):
            if not isinstance(x, Variable):
                x = Variable(torch.from_numpy(np.asarray(x).astype(np.float32)))
            rs = x.mm(self._w)
            rs = rs.add_(self._b.expand_as(rs)).squeeze(1)
            if get_raw_results:
                return rs
            return torch.sign(rs)

        @TorchSVMTiming.timeit(level=1, prefix="[API] ")
        def predict(self, x, get_raw_results=False, gram_provided=False):
            if not gram_provided:
                x = self._kernel(self._x.data.numpy(), np.atleast_2d(x))
            y_pred = (self._w.data.numpy().ravel().dot(x) + self._b.data.numpy()).ravel()
            if not get_raw_results:
                return np.sign(y_pred)
            return y_pred
开发者ID:bitores,项目名称:MachineLearning,代码行数:58,代码来源:SVM.py

示例4: theta_to_sampling_grid

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
def theta_to_sampling_grid(out_h,out_w,theta_aff=None,theta_tps=None,theta_aff_tps=None,use_cuda=True,tps_reg_factor=0):
    affTnf = GeometricTnf(out_h=out_h,out_w=out_w,geometric_model='affine',use_cuda=use_cuda)
    tpsTnf = GeometricTnf(out_h=out_h,out_w=out_w,geometric_model='tps',use_cuda=use_cuda,tps_reg_factor=tps_reg_factor)

    if theta_aff is not None:
        sampling_grid_aff = affTnf(image_batch=None,
                                               theta_batch=theta_aff.view(1,2,3),
                                               return_sampling_grid=True,
                                               return_warped_image=False)
    else:
        sampling_grid_aff=None
    
    if theta_tps is not None:
        sampling_grid_tps = tpsTnf(image_batch=None,
                                               theta_batch=theta_tps.view(1,-1),
                                               return_sampling_grid=True,
                                               return_warped_image=False)
    else:
        sampling_grid_tps=None
        
    if theta_aff is not None and theta_aff_tps is not None:
        sampling_grid_aff_tps = tpsTnf(image_batch=None,
                                   theta_batch=theta_aff_tps.view(1,-1),
                                   return_sampling_grid=True,
                                   return_warped_image=False)
        
        # put 1e10 value in region out of bounds of sampling_grid_aff
        sampling_grid_aff = sampling_grid_aff.clone()
        in_bound_mask_aff=Variable((sampling_grid_aff.data[:,:,:,0]>-1) & (sampling_grid_aff.data[:,:,:,0]<1) & (sampling_grid_aff.data[:,:,:,1]>-1) & (sampling_grid_aff.data[:,:,:,1]<1)).unsqueeze(3)
        in_bound_mask_aff=in_bound_mask_aff.expand_as(sampling_grid_aff)
        sampling_grid_aff = torch.add((in_bound_mask_aff.float()-1)*(1e10),torch.mul(in_bound_mask_aff.float(),sampling_grid_aff))       
        # put 1e10 value in region out of bounds of sampling_grid_aff_tps_comp
        sampling_grid_aff_tps_comp = F.grid_sample(sampling_grid_aff.transpose(2,3).transpose(1,2), sampling_grid_aff_tps).transpose(1,2).transpose(2,3)
        in_bound_mask_aff_tps=Variable((sampling_grid_aff_tps.data[:,:,:,0]>-1) & (sampling_grid_aff_tps.data[:,:,:,0]<1) & (sampling_grid_aff_tps.data[:,:,:,1]>-1) & (sampling_grid_aff_tps.data[:,:,:,1]<1)).unsqueeze(3)
        in_bound_mask_aff_tps=in_bound_mask_aff_tps.expand_as(sampling_grid_aff_tps_comp)
        sampling_grid_aff_tps_comp = torch.add((in_bound_mask_aff_tps.float()-1)*(1e10),torch.mul(in_bound_mask_aff_tps.float(),sampling_grid_aff_tps_comp))       
    else:
        sampling_grid_aff_tps_comp = None

    return (sampling_grid_aff,sampling_grid_tps,sampling_grid_aff_tps_comp) 
开发者ID:codealphago,项目名称:weakalign,代码行数:42,代码来源:eval_util.py

示例5: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
    def forward(self, sent_tuple):
        # sent_len: [max_len, ..., min_len] (bsize)
        # sent: Variable(seqlen x bsize x worddim)
        sent, sent_len = sent_tuple

        # Sort by length (keep idx)
        sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
        idx_unsort = np.argsort(idx_sort)

        idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
            else torch.from_numpy(idx_sort)
        sent = sent.index_select(1, Variable(idx_sort))

        # Handling padding in Recurrent Networks
        sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
        sent_output = self.enc_lstm(sent_packed)[0]  # seqlen x batch x 2*nhid
        sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]

        # Un-sort by length
        idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
            else torch.from_numpy(idx_unsort)
        sent_output = sent_output.index_select(1, Variable(idx_unsort))

        # Pooling
        if self.pool_type == "mean":
            sent_len = Variable(torch.FloatTensor(sent_len.copy())).unsqueeze(1).cuda()
            emb = torch.sum(sent_output, 0).squeeze(0)
            emb = emb / sent_len.expand_as(emb)
        elif self.pool_type == "max":
            if not self.max_pad:
                sent_output[sent_output == 0] = -1e9
            emb = torch.max(sent_output, 0)[0]
            if emb.ndimension() == 3:
                emb = emb.squeeze(0)
                assert emb.ndimension() == 2

        return emb
开发者ID:pemazare,项目名称:InferSent,代码行数:39,代码来源:models.py

示例6: Delta

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class Delta(Distribution):
    """
    Degenerate discrete distribution (a single point).

    Discrete distribution that assigns probability one to the single element in
    its support. Delta distribution parameterized by a random choice should not
    be used with MCMC based inference, as doing so produces incorrect results.

    :param torch.autograd.Variable v: The single support element.
    """
    enumerable = True

    def __init__(self, v, batch_size=None, *args, **kwargs):
        self.v = v
        if not isinstance(self.v, Variable):
            self.v = Variable(self.v)
        if v.dim() == 1 and batch_size is not None:
            self.v = v.expand(v, v.size(0))
        super(Delta, self).__init__(*args, **kwargs)

    def batch_shape(self, x=None):
        """
        Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_shape`
        """
        event_dim = 1
        v = self.v
        if x is not None:
            if x.size()[-event_dim] != v.size()[-event_dim]:
                raise ValueError("The event size for the data and distribution parameters must match.\n"
                                 "Expected x.size()[-1] == self.v.size()[-1], but got {} vs {}".format(
                                     x.size(-1), v.size(-1)))
            try:
                v = self.v.expand_as(x)
            except RuntimeError as e:
                raise ValueError("Parameter `v` with shape {} is not broadcastable to "
                                 "the data shape {}. \nError: {}".format(v.size(), x.size(), str(e)))
        return v.size()[:-event_dim]

    def event_shape(self):
        """
        Ref: :py:meth:`pyro.distributions.distribution.Distribution.event_shape`
        """
        event_dim = 1
        return self.v.size()[-event_dim:]

    def sample(self):
        """
        Ref: :py:meth:`pyro.distributions.distribution.Distribution.sample`
        """
        return self.v

    def batch_log_pdf(self, x):
        """
        Ref: :py:meth:`pyro.distributions.distribution.Distribution.batch_log_pdf`
        """
        v = self.v
        v = v.expand(self.shape(x))
        batch_shape = self.batch_shape(x) + (1,)
        return torch.sum(torch.eq(x, v).float().log(), -1).contiguous().view(batch_shape)

    def enumerate_support(self, v=None):
        """
        Returns the delta distribution's support, as a tensor along the first dimension.

        :param v: torch variable where each element of the tensor represents the point at
            which the delta distribution is concentrated.
        :return: torch variable enumerating the support of the delta distribution.
        :rtype: torch.autograd.Variable.
        """
        return Variable(self.v.data)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:72,代码来源:delta.py

示例7: RaoBlackwellizationTests

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class RaoBlackwellizationTests(TestCase):
    def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.n_outer = 3
        self.n_inner = 3
        self.n_data = Variable(torch.Tensor([self.n_outer * self.n_inner]))
        self.data = []
        self.sum_data = ng_zeros(2)
        for _out in range(self.n_outer):
            data_in = []
            for _in in range(self.n_inner):
                data_in.append(Variable(torch.Tensor([-0.1, 0.3]) + torch.randn(2) / torch.sqrt(self.lam.data)))
                self.sum_data += data_in[-1]
            self.data.append(data_in)
        self.analytic_lam_n = self.lam0 + self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True

    # this tests rao-blackwellization in elbo for nested list map_datas
    def test_nested_list_map_data_in_elbo(self, n_steps=4000):
        pyro.clear_param_store()

        def model():
            mu_latent = pyro.sample(
                    "mu_latent",
                    dist.Normal(self.mu0, torch.pow(self.lam0, -0.5), reparameterized=False))

            def obs_outer(i, x):
                pyro.map_data("map_obs_inner_%d" % i, x, lambda _i, _x:
                              obs_inner(i, _i, _x), batch_size=3)

            def obs_inner(i, _i, _x):
                pyro.observe("obs_%d_%d" % (i, _i), dist.normal, _x, mu_latent,
                             torch.pow(self.lam, -0.5))

            pyro.map_data("map_obs_outer", self.data, lambda i, x:
                          obs_outer(i, x), batch_size=3)

            return mu_latent

        def guide():
            mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.234 * torch.ones(2),
                                               requires_grad=True))
            log_sig_q = pyro.param("log_sig_q", Variable(
                                   self.analytic_log_sig_n.data - 0.27 * torch.ones(2),
                                   requires_grad=True))
            sig_q = torch.exp(log_sig_q)
            mu_latent = pyro.sample(
                    "mu_latent",
                    dist.Normal(mu_q, sig_q, reparameterized=False),
                    baseline=dict(use_decaying_avg_baseline=True))

            def obs_outer(i, x):
                pyro.map_data("map_obs_inner_%d" % i, x, lambda _i, _x:
                              None, batch_size=3)

            pyro.map_data("map_obs_outer", self.data, lambda i, x:
                          obs_outer(i, x), batch_size=3)

            return mu_latent

        guide_trace = pyro.poutine.trace(guide, graph_type="dense").get_trace()
        model_trace = pyro.poutine.trace(pyro.poutine.replay(model, guide_trace),
                                         graph_type="dense").get_trace()
        assert len(model_trace.edges()) == 27
        assert len(model_trace.nodes()) == 16
        assert len(guide_trace.edges()) == 0
        assert len(guide_trace.nodes()) == 9

        adam = optim.Adam({"lr": 0.0008, "betas": (0.96, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=True)

        for k in range(n_steps):
            svi.step()

            mu_error = param_mse("mu_q", self.analytic_mu_n)
            log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)
            if k % 500 == 0 and self.verbose:
                print("mu error, log(sigma) error:  %.4f, %.4f" % (mu_error, log_sig_error))

        self.assertEqual(0.0, mu_error, prec=0.04)
        self.assertEqual(0.0, log_sig_error, prec=0.04)

    # this tests rao-blackwellization and baselines for a vectorized map_data
    # inside of a list map_data with superfluous random variables to complexify the
    # graph structure and introduce additional baselines
    def test_vectorized_map_data_in_elbo_with_superfluous_rvs(self):
        self._test_vectorized_map_data_in_elbo(n_superfluous_top=2, n_superfluous_bottom=2, n_steps=6000)

    def _test_vectorized_map_data_in_elbo(self, n_superfluous_top, n_superfluous_bottom, n_steps):
        pyro.clear_param_store()
        self.data_tensor = Variable(torch.zeros(9, 2))
        for _out in range(self.n_outer):
            for _in in range(self.n_inner):
#.........这里部分代码省略.........
开发者ID:Magica-Chen,项目名称:pyro,代码行数:103,代码来源:test_tracegraph_elbo.py

示例8: int

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
import numpy as np
ones = np.ones((4, 1), np.int64)
ones = int(ones.argmax())
print(ones)

y = Variable(torch.LongTensor([ones]))
x = Variable(torch.randn(4, 1))
#x = x.view(1, 4)

print((np.argmax(x.data)))
loss_function = torch.nn.Softmax()
print("softmax")
print(loss_function(x))

torch.randn
z = Variable(torch.randn(5, 2))
y = Variable(torch.randn(1, 2))
print(y)
print(y.expand_as(z))

t = Variable(torch.Tensor(np.random.uniform(0.1, -0.1, (1, 5))))
print(t)

val, index = torch.max(t, 1)
val, index = t.max(0)
print(np.argmax(t.data.numpy()))

x = Variable(torch.randn(3, 2))
print(x)
result = torch.sum(x, 1)
print(result)
开发者ID:Og192,项目名称:python,代码行数:33,代码来源:ops.py

示例9: NormalNormalNormalTests

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class NormalNormalNormalTests(TestCase):

    def setUp(self):
        # normal-normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))  # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True

    def test_elbo_reparameterized(self):
        self.do_elbo_test(True, True, 5000, 0.02, 0.002, False, False)

    def test_elbo_nonreparameterized_both_baselines(self):
        self.do_elbo_test(False, False, 15000, 0.05, 0.001, use_nn_baseline=True,
                          use_decaying_avg_baseline=True)

    def test_elbo_nonreparameterized_decaying_baseline(self):
        self.do_elbo_test(True, False, 12000, 0.04, 0.0015, use_nn_baseline=False,
                          use_decaying_avg_baseline=True)

    def test_elbo_nonreparameterized_nn_baseline(self):
        self.do_elbo_test(False, True, 12000, 0.04, 0.0015, use_nn_baseline=True,
                          use_decaying_avg_baseline=False)

    def do_elbo_test(self, repa1, repa2, n_steps, prec, lr, use_nn_baseline, use_decaying_avg_baseline):
        if self.verbose:
            print(" - - - - - DO NORMALNORMALNORMAL ELBO TEST - - - - - -")
            print("[reparameterized = %s, %s; nn_baseline = %s, decaying_baseline = %s]" %
                  (repa1, repa2, use_nn_baseline, use_decaying_avg_baseline))
        pyro.clear_param_store()

        if use_nn_baseline:

            class VanillaBaselineNN(nn.Module):
                def __init__(self, dim_input, dim_h):
                    super(VanillaBaselineNN, self).__init__()
                    self.lin1 = nn.Linear(dim_input, dim_h)
                    self.lin2 = nn.Linear(dim_h, 1)
                    self.sigmoid = nn.Sigmoid()

                def forward(self, x):
                    h = self.sigmoid(self.lin1(x))
                    return self.lin2(h)

            mu_prime_baseline = pyro.module("mu_prime_baseline", VanillaBaselineNN(2, 5), tags="baseline")
        else:
            mu_prime_baseline = None

        def model():
            mu_latent_prime = pyro.sample(
                    "mu_latent_prime",
                    dist.Normal(self.mu0, torch.pow(self.lam0, -0.5), reparameterized=repa1))
            mu_latent = pyro.sample(
                    "mu_latent",
                    dist.Normal(mu_latent_prime, torch.pow(self.lam0, -0.5), reparameterized=repa2))
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_latent,
                             torch.pow(self.lam, -0.5))
            return mu_latent

        # note that the exact posterior is not mean field!
        def guide():
            mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.334 * torch.ones(2),
                                               requires_grad=True))
            log_sig_q = pyro.param("log_sig_q", Variable(
                                   self.analytic_log_sig_n.data - 0.29 * torch.ones(2),
                                   requires_grad=True))
            mu_q_prime = pyro.param("mu_q_prime", Variable(torch.Tensor([-0.34, 0.52]),
                                    requires_grad=True))
            kappa_q = pyro.param("kappa_q", Variable(torch.Tensor([0.74]),
                                 requires_grad=True))
            log_sig_q_prime = pyro.param("log_sig_q_prime",
                                         Variable(-0.5 * torch.log(1.2 * self.lam0.data),
                                                  requires_grad=True))
            sig_q, sig_q_prime = torch.exp(log_sig_q), torch.exp(log_sig_q_prime)
            mu_latent_dist = dist.Normal(mu_q, sig_q, reparameterized=repa2)
            mu_latent = pyro.sample("mu_latent", mu_latent_dist,
                                    baseline=dict(use_decaying_avg_baseline=use_decaying_avg_baseline))
            mu_latent_prime_dist = dist.Normal(kappa_q.expand_as(mu_latent) * mu_latent + mu_q_prime,
                                               sig_q_prime,
                                               reparameterized=repa1)
            pyro.sample("mu_latent_prime",
                        mu_latent_prime_dist,
                        baseline=dict(nn_baseline=mu_prime_baseline,
                                      nn_baseline_input=mu_latent,
                                      use_decaying_avg_baseline=use_decaying_avg_baseline))
#.........这里部分代码省略.........
开发者ID:Magica-Chen,项目名称:pyro,代码行数:103,代码来源:test_tracegraph_elbo.py

示例10: NormalNormalTests

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class NormalNormalTests(TestCase):

    def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.verbose = True

    def test_elbo_reparameterized(self):
        self.do_elbo_test(True, 1000)

    @pytest.mark.init(rng_seed=0)
    def test_elbo_nonreparameterized(self):
        self.do_elbo_test(False, 5000)

    def do_elbo_test(self, reparameterized, n_steps):
        if self.verbose:
            print(" - - - - - DO NORMALNORMAL ELBO TEST  [reparameterized = %s] - - - - - " % reparameterized)
        pyro.clear_param_store()

        def model():
            mu_latent = pyro.sample(
                    "mu_latent",
                    dist.Normal(self.mu0, torch.pow(self.lam0, -0.5), reparameterized=reparameterized))
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_latent,
                             torch.pow(self.lam, -0.5))
            return mu_latent

        def guide():
            mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.334 * torch.ones(2),
                                               requires_grad=True))
            log_sig_q = pyro.param("log_sig_q", Variable(
                                   self.analytic_log_sig_n.data - 0.29 * torch.ones(2),
                                   requires_grad=True))
            sig_q = torch.exp(log_sig_q)
            mu_latent = pyro.sample("mu_latent",
                                    dist.Normal(mu_q, sig_q, reparameterized=reparameterized),
                                    baseline=dict(use_decaying_avg_baseline=True))
            return mu_latent

        adam = optim.Adam({"lr": .0015, "betas": (0.97, 0.999)})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=True)

        for k in range(n_steps):
            svi.step()

            mu_error = param_mse("mu_q", self.analytic_mu_n)
            log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)
            if k % 250 == 0 and self.verbose:
                print("mu error, log(sigma) error:  %.4f, %.4f" % (mu_error, log_sig_error))

        self.assertEqual(0.0, mu_error, prec=0.03)
        self.assertEqual(0.0, log_sig_error, prec=0.03)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:71,代码来源:test_tracegraph_elbo.py

示例11: GaussianChainTests

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class GaussianChainTests(TestCase):
    # chain of normals with known covariances and latent means

    def setUp(self):
        self.mu0 = Variable(torch.Tensor([0.2]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1])))
        self.data.append(Variable(torch.Tensor([0.03])))
        self.data.append(Variable(torch.Tensor([0.20])))
        self.data.append(Variable(torch.Tensor([0.10])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + self.data[1] + self.data[2] + self.data[3]
        self.verbose = True

    def setup_chain(self, N):
        self.N = N  # number of latent variables in the chain
        lambdas = [1.5 * (k + 1) / N for k in range(N + 1)]
        self.lambdas = list(map(lambda x: Variable(torch.Tensor([x])), lambdas))
        self.lambda_tilde_posts = [self.lambdas[0]]
        for k in range(1, self.N):
            lambda_tilde_k = (self.lambdas[k] * self.lambda_tilde_posts[k - 1]) /\
                (self.lambdas[k] + self.lambda_tilde_posts[k - 1])
            self.lambda_tilde_posts.append(lambda_tilde_k)
        self.lambda_posts = [None]  # this is never used (just a way of shifting the indexing by 1)
        for k in range(1, self.N):
            lambda_k = self.lambdas[k] + self.lambda_tilde_posts[k - 1]
            self.lambda_posts.append(lambda_k)
        lambda_N_post = (self.n_data.expand_as(self.lambdas[N]) * self.lambdas[N]) +\
            self.lambda_tilde_posts[N - 1]
        self.lambda_posts.append(lambda_N_post)
        self.target_kappas = [None]
        self.target_kappas.extend([self.lambdas[k] / self.lambda_posts[k] for k in range(1, self.N)])
        self.target_mus = [None]
        self.target_mus.extend([self.mu0 * self.lambda_tilde_posts[k - 1] / self.lambda_posts[k]
                                for k in range(1, self.N)])
        target_mu_N = self.sum_data * self.lambdas[N] / lambda_N_post +\
            self.mu0 * self.lambda_tilde_posts[N - 1] / lambda_N_post
        self.target_mus.append(target_mu_N)
        self.which_nodes_reparam = self.setup_reparam_mask(N)

    # controls which nodes are reparameterized
    def setup_reparam_mask(self, N):
        while True:
            mask = torch.bernoulli(0.30 * torch.ones(N))
            if torch.sum(mask) < 0.40 * N and torch.sum(mask) > 0.5:
                return mask

    def test_elbo_reparameterized_N_is_3(self):
        self.setup_chain(3)
        self.do_elbo_test(True, 4000, 0.0015, 0.03, difficulty=1.0)

    def test_elbo_reparameterized_N_is_8(self):
        self.setup_chain(8)
        self.do_elbo_test(True, 5000, 0.0015, 0.03, difficulty=1.0)

    def test_elbo_reparameterized_N_is_17(self):
        self.setup_chain(17)
        self.do_elbo_test(True, 5000, 0.0015, 0.03, difficulty=1.0)

    def test_elbo_nonreparameterized_N_is_3(self):
        self.setup_chain(3)
        self.do_elbo_test(False, 5000, 0.001, 0.04, difficulty=0.6)

    def test_elbo_nonreparameterized_N_is_5(self):
        self.setup_chain(5)
        self.do_elbo_test(False, 5000, 0.001, 0.06, difficulty=0.6)

    def test_elbo_nonreparameterized_N_is_7(self):
        self.setup_chain(7)
        self.do_elbo_test(False, 5000, 0.001, 0.05, difficulty=0.6)

    def do_elbo_test(self, reparameterized, n_steps, lr, prec, difficulty=1.0):
        if self.verbose:
            n_repa_nodes = torch.sum(self.which_nodes_reparam) if not reparameterized else self.N
            print(" - - - - - DO GAUSSIAN %d-CHAIN ELBO TEST  [reparameterized = %s; %d/%d] - - - - - " %
                  (self.N, reparameterized, n_repa_nodes, self.N))
            if self.N < 0:
                def array_to_string(y):
                    return str(map(lambda x: "%.3f" % x.data.cpu().numpy()[0], y))

                print("lambdas: " + array_to_string(self.lambdas))
                print("target_mus: " + array_to_string(self.target_mus[1:]))
                print("target_kappas: " + array_to_string(self.target_kappas[1:]))
                print("lambda_posts: " + array_to_string(self.lambda_posts[1:]))
                print("lambda_tilde_posts: " + array_to_string(self.lambda_tilde_posts))
        pyro.clear_param_store()

        def model(*args, **kwargs):
            next_mean = self.mu0
            for k in range(1, self.N + 1):
                latent_dist = dist.Normal(next_mean, torch.pow(self.lambdas[k - 1], -0.5))
                mu_latent = pyro.sample("mu_latent_%d" % k, latent_dist)
                next_mean = mu_latent

            mu_N = next_mean
            for i, x in enumerate(self.data):
                pyro.observe("obs_%d" % i, dist.normal, x, mu_N,
                             torch.pow(self.lambdas[self.N], -0.5))
            return mu_N

#.........这里部分代码省略.........
开发者ID:Magica-Chen,项目名称:pyro,代码行数:103,代码来源:test_conjugate_gaussian_models.py

示例12: Normalize

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class Normalize(SubLayer):
    def __init__(self, parent, shape, lr=0.001, eps=1e-8, momentum=0.9, optimizers=None):
        SubLayer.__init__(self, parent, shape)
        self.sample_mean, self.sample_var = None, None
        self.running_mean, self.running_var = None, None
        self.x_cache, self.x_normalized_cache = None, None
        self._lr, self._eps = lr, eps
        if optimizers is None:
            self._g_optimizer, self._b_optimizer = Adam(self._lr), Adam(self._lr)
        else:
            self._g_optimizer, self._b_optimizer = optimizers
        self.gamma = Variable(torch.ones(self.shape[1]), requires_grad=True)
        self.beta = Variable(torch.ones(self.shape[1]), requires_grad=True)
        self._momentum = momentum
        self.init_optimizers()
        self.description = "(lr: {}, eps: {}, momentum: {}, optimizer: ({}, {}))".format(
            lr, eps, momentum, self._g_optimizer.name, self._b_optimizer.name
        )

    def get_params(self):
        return self._lr, self._eps, self._momentum, (self._g_optimizer.name, self._b_optimizer.name)

    @property
    def special_params(self):
        return {
            "gamma": self.gamma, "beta": self.beta,
            "running_mean": self.running_mean, "running_var": self.running_var,
            "_g_optimizer": self._g_optimizer, "_b_optimizer": self._b_optimizer
        }

    def init_optimizers(self):
        _opt_fac = OptFactory()
        if not isinstance(self._g_optimizer, Optimizer):
            self._g_optimizer = _opt_fac.get_optimizer_by_name(
                self._g_optimizer, None, self._lr, None
            )
        if not isinstance(self._b_optimizer, Optimizer):
            self._b_optimizer = _opt_fac.get_optimizer_by_name(
                self._b_optimizer, None, self._lr, None
            )
        self._g_optimizer.feed_variables([self.gamma])
        self._b_optimizer.feed_variables([self.beta])

    # noinspection PyTypeChecker
    def _activate(self, x, predict):
        if self.running_mean is None or self.running_var is None:
            self.running_mean = Variable(torch.zeros(x.size()[1]))
            self.running_var = Variable(torch.zeros(x.size()[1]))
        if not predict:
            self.sample_mean = torch.mean(x, dim=0)
            self.sample_var = torch.var(x, dim=0)
            x_normalized = (x - self.sample_mean.expand_as(x)) / torch.sqrt(self.sample_var + self._eps).expand_as(x)
            self.x_cache, self.x_normalized_cache = x, x_normalized
            out = self.gamma.expand_as(x_normalized) * x_normalized + self.beta.expand_as(x_normalized)
            self.running_mean = self._momentum * self.running_mean + (1 - self._momentum) * self.sample_mean
            self.running_var = self._momentum * self.running_var + (1 - self._momentum) * self.sample_var
            if self.gamma.grad is not None and self.beta.grad is not None:
                self._g_optimizer.update()
                self._b_optimizer.update()
                self.gamma.data -= self._g_optimizer.run(0, self.gamma.grad.data)
                self.beta.data -= self._b_optimizer.run(0, self.beta.grad.data)
                self.gamma.grad.data.zero_()
                self.beta.grad.data.zero_()
        else:
            x_normalized = (x - self.running_mean.expand_as(x)) / torch.sqrt(self.running_var + self._eps).expand_as(x)
            out = self.gamma.expand_as(x) * x_normalized + self.beta.expand_as(x)
        return out
开发者ID:bitores,项目名称:MachineLearning,代码行数:69,代码来源:Layers.py

示例13: TorchLinearSVM

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
    class TorchLinearSVM(TorchAutoClassifierBase):
        TorchLinearSVMTiming = Timing()

        def __init__(self, **kwargs):
            super(TorchLinearSVM, self).__init__(**kwargs)
            self._w = self._b = None

            self._params["c"] = kwargs.get("c", 1)
            self._params["lr"] = kwargs.get("lr", 0.001)
            self._params["batch_size"] = kwargs.get("batch_size", 128)
            self._params["epoch"] = kwargs.get("epoch", 10 ** 4)
            self._params["tol"] = kwargs.get("tol", 1e-3)
            self._params["optimizer"] = kwargs.get("optimizer", "Adam")

        @TorchLinearSVMTiming.timeit(level=1, prefix="[Core] ")
        def _loss(self, y, y_pred, c):
            return torch.sum(
                torch.clamp(1 - y * y_pred, min=0)
            ) + c * torch.sqrt(torch.sum(self._w * self._w))

        @TorchLinearSVMTiming.timeit(level=1, prefix="[API] ")
        def fit(self, x, y, c=None, lr=None, batch_size=None, epoch=None, tol=None,
                optimizer=None, animation_params=None):
            if c is None:
                c = self._params["c"]
            if lr is None:
                lr = self._params["lr"]
            if batch_size is None:
                batch_size = self._params["batch_size"]
            if epoch is None:
                epoch = self._params["epoch"]
            if tol is None:
                tol = self._params["tol"]
            if optimizer is None:
                optimizer = self._params["optimizer"]
            *animation_properties, animation_params = self._get_animation_params(animation_params)
            x, y = np.atleast_2d(x), np.asarray(y, dtype=np.float32)
            y_2d = y[..., None]

            self._w = Variable(torch.rand([x.shape[1], 1]), requires_grad=True)
            self._b = Variable(torch.Tensor([0.]), requires_grad=True)
            self._model_parameters = [self._w, self._b]
            self._optimizer = PyTorchOptFac().get_optimizer_by_name(
                optimizer, self._model_parameters, lr, epoch
            )

            x, y, y_2d = self._arr_to_variable(False, x, y, y_2d)
            loss_function = lambda _y, _y_pred: self._loss(_y, _y_pred, c)

            bar = ProgressBar(max_value=epoch, name="TorchLinearSVM")
            ims = []
            train_repeat = self._get_train_repeat(x, batch_size)
            for i in range(epoch):
                self._optimizer.update()
                l = self.batch_training(
                    x, y_2d, batch_size, train_repeat, loss_function
                )
                if l < tol:
                    bar.terminate()
                    break
                self._handle_animation(i, x, y, ims, animation_params, *animation_properties)
                bar.update()
            self._handle_mp4(ims, animation_properties)

        @TorchLinearSVMTiming.timeit(level=1, prefix="[API] ")
        def _predict(self, x, get_raw_results=False, **kwargs):
            if not isinstance(x, Variable):
                x = Variable(torch.from_numpy(np.asarray(x).astype(np.float32)))
            rs = x.mm(self._w)
            rs = rs.add_(self._b.expand_as(rs)).squeeze(1)
            if get_raw_results:
                return rs
            return torch.sign(rs)
开发者ID:bitores,项目名称:MachineLearning,代码行数:75,代码来源:LinearSVM.py

示例14: NormalNormalTests

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class NormalNormalTests(TestCase):

    def setUp(self):
        # normal-normal; known covariance
        self.lam0 = Variable(torch.Tensor([0.1, 0.1]))   # precision of prior
        self.mu0 = Variable(torch.Tensor([0.0, 0.5]))   # prior mean
        # known precision of observation noise
        self.lam = Variable(torch.Tensor([6.0, 4.0]))
        self.data = []
        self.data.append(Variable(torch.Tensor([-0.1, 0.3])))
        self.data.append(Variable(torch.Tensor([0.00, 0.4])))
        self.data.append(Variable(torch.Tensor([0.20, 0.5])))
        self.data.append(Variable(torch.Tensor([0.10, 0.7])))
        self.n_data = Variable(torch.Tensor([len(self.data)]))
        self.sum_data = self.data[0] + \
            self.data[1] + self.data[2] + self.data[3]
        self.analytic_lam_n = self.lam0 + \
            self.n_data.expand_as(self.lam) * self.lam
        self.analytic_log_sig_n = -0.5 * torch.log(self.analytic_lam_n)
        self.analytic_mu_n = self.sum_data * (self.lam / self.analytic_lam_n) +\
            self.mu0 * (self.lam0 / self.analytic_lam_n)
        self.batch_size = 4

    def test_elbo_reparameterized(self):
        self.do_elbo_test(True, 5000)

    def test_elbo_nonreparameterized(self):
        self.do_elbo_test(False, 15000)

    def do_elbo_test(self, reparameterized, n_steps):
        pyro.clear_param_store()

        def model():
            mu_latent = pyro.sample("mu_latent", dist.normal,
                                    self.mu0, torch.pow(self.lam0, -0.5))
            pyro.map_data("aaa", self.data, lambda i,
                          x: pyro.observe(
                              "obs_%d" % i, dist.normal,
                              x, mu_latent, torch.pow(self.lam, -0.5)),
                          batch_size=self.batch_size)
            return mu_latent

        def guide():
            mu_q = pyro.param("mu_q", Variable(self.analytic_mu_n.data + 0.134 * torch.ones(2),
                                               requires_grad=True))
            log_sig_q = pyro.param("log_sig_q", Variable(
                                   self.analytic_log_sig_n.data - 0.14 * torch.ones(2),
                                   requires_grad=True))
            sig_q = torch.exp(log_sig_q)
            pyro.sample("mu_latent", dist.Normal(mu_q, sig_q, reparameterized=reparameterized))
            pyro.map_data("aaa", self.data, lambda i, x: None,
                          batch_size=self.batch_size)

        adam = optim.Adam({"lr": .001})
        svi = SVI(model, guide, adam, loss="ELBO", trace_graph=False)

        for k in range(n_steps):
            svi.step()

            mu_error = param_mse("mu_q", self.analytic_mu_n)
            log_sig_error = param_mse("log_sig_q", self.analytic_log_sig_n)

        self.assertEqual(0.0, mu_error, prec=0.05)
        self.assertEqual(0.0, log_sig_error, prec=0.05)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:66,代码来源:test_inference.py

示例15: Policy

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import expand_as [as 别名]
class Policy(nn.Module):

    def __init__(self, hidden_size, num_inputs, action_space):
        super(Policy, self).__init__()
        self.action_space = action_space
        num_outputs = action_space.shape[0]

        self.bn0 = nn.BatchNorm1d(num_inputs)
        self.bn0.weight.data.fill_(1)
        self.bn0.bias.data.fill_(0)

        self.linear1 = nn.Linear(num_inputs, hidden_size)
        self.bn1 = nn.BatchNorm1d(hidden_size)
        self.bn1.weight.data.fill_(1)
        self.bn1.bias.data.fill_(0)

        self.linear2 = nn.Linear(hidden_size, hidden_size)
        self.bn2 = nn.BatchNorm1d(hidden_size)
        self.bn2.weight.data.fill_(1)
        self.bn2.bias.data.fill_(0)

        self.V = nn.Linear(hidden_size, 1)
        self.V.weight.data.mul_(0.1)
        self.V.bias.data.mul_(0.1)

        self.mu = nn.Linear(hidden_size, num_outputs)
        self.mu.weight.data.mul_(0.1)
        self.mu.bias.data.mul_(0.1)

        self.L = nn.Linear(hidden_size, num_outputs ** 2)
        self.L.weight.data.mul_(0.1)
        self.L.bias.data.mul_(0.1)

        self.tril_mask = Variable(torch.tril(torch.ones(
            num_outputs, num_outputs), diagonal=-1).unsqueeze(0))
        self.diag_mask = Variable(torch.diag(torch.diag(
            torch.ones(num_outputs, num_outputs))).unsqueeze(0))

    def forward(self, inputs):
        x, u = inputs
        x = self.bn0(x)
        x = F.tanh(self.linear1(x))
        x = F.tanh(self.linear2(x))

        V = self.V(x)
        mu = F.tanh(self.mu(x))

        Q = None
        if u is not None:
            num_outputs = mu.size(1)
            L = self.L(x).view(-1, num_outputs, num_outputs)
            L = L * \
                self.tril_mask.expand_as(
                    L) + torch.exp(L) * self.diag_mask.expand_as(L)
            P = torch.bmm(L, L.transpose(2, 1))

            u_mu = (u - mu).unsqueeze(2)
            A = -0.5 * \
                torch.bmm(torch.bmm(u_mu.transpose(2, 1), P), u_mu)[:, :, 0]

            Q = A + V

        return mu, Q, V
开发者ID:lenvdv,项目名称:pytorch-ddpg-naf,代码行数:65,代码来源:naf.py


注:本文中的torch.autograd.Variable.expand_as方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。