當前位置: 首頁>>代碼示例>>Python>>正文


Python autograd.value_and_grad方法代碼示例

本文整理匯總了Python中autograd.value_and_grad方法的典型用法代碼示例。如果您正苦於以下問題:Python autograd.value_and_grad方法的具體用法?Python autograd.value_and_grad怎麽用?Python autograd.value_and_grad使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在autograd的用法示例。


在下文中一共展示了autograd.value_and_grad方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.y_batch = fetch_minibatch(self.X,self.y,self.N_batch) 
            
            # Compute likelihood and gradients 
            nlml, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        nlml, D_NLML = NLML(self.hyp) 
開發者ID:maziarraissi,項目名稱:ParametricGP,代碼行數:25,代碼來源:parametric_GP.py

示例2: rearrange_dict_grad

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def rearrange_dict_grad(fun):
    """
    Decorator that allows us to save memory on the forward pass,
    by precomputing the gradient
    """
    @primitive
    def wrapped_fun_helper(xdict, dummy):
        ## ag.value_and_grad() to avoid second forward pass
        ## ag.checkpoint() ensures hessian gets properly checkpointed
        val, grad = ag.checkpoint(ag.value_and_grad(fun))(xdict)
        assert len(val.shape) == 0
        dummy.cache = grad
        return val

    def wrapped_fun_helper_grad(ans, xdict, dummy):
        def grad(g):
            #print("foo")
            return {k:g*v for k,v in dummy.cache.items()}
        return grad
    defvjp(wrapped_fun_helper, wrapped_fun_helper_grad, None)

    @functools.wraps(fun)
    def wrapped_fun(xdict):
        return wrapped_fun_helper(ag.dict(xdict), lambda:None)
    return wrapped_fun 
開發者ID:popgenmethods,項目名稱:momi2,代碼行數:27,代碼來源:likelihood.py

示例3: EM

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def EM(init_params, data, callback=None):
    def EM_update(params):
        natural_params = list(map(np.log, params))
        loglike, E_stats = vgrad(log_partition_function)(natural_params, data)  # E step
        if callback: callback(loglike, params)
        return list(map(normalize, E_stats))                                    # M step

    def fixed_point(f, x0):
        x1 = f(x0)
        while different(x0, x1):
            x0, x1 = x1, f(x1)
        return x1

    def different(params1, params2):
        allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
        return not all(map(allclose, params1, params2))

    return fixed_point(EM_update, init_params) 
開發者ID:HIPS,項目名稱:autograd,代碼行數:20,代碼來源:hmm_em.py

示例4: gamma_optimizer

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def gamma_optimizer(gamma_guess, failures, right_censored):
        failures_shifted = failures - gamma_guess[0]
        right_censored_shifted = right_censored - gamma_guess[0]
        all_data_shifted = np.hstack([failures_shifted, right_censored_shifted])
        sp = ss.lognorm.fit(all_data_shifted, floc=0, optimizer='powell')  # scipy's answer is used as an initial guess. Scipy is only correct when there is no censored data
        guess = [np.log(sp[2]), sp[0]]
        warnings.filterwarnings('ignore')  # necessary to supress the warning about the jacobian when using the nelder-mead optimizer
        result = minimize(value_and_grad(Fit_Lognormal_2P.LL), guess, args=(failures_shifted, right_censored_shifted), jac=True, tol=1e-2, method='nelder-mead')

        if result.success is True:
            params = result.x
            mu = params[0]
            sigma = params[1]
        else:
            print('WARNING: Fitting using Autograd FAILED for the gamma optimisation section of Lognormal_3P. The fit from Scipy was used instead so results may not be accurate.')
            mu = sp[2]
            sigma = sp[0]

        LL2 = 2 * Fit_Lognormal_2P.LL([mu, sigma], failures_shifted, right_censored_shifted)
        return LL2 
開發者ID:MatthewReid854,項目名稱:reliability,代碼行數:22,代碼來源:Fitters.py

示例5: train

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def train(self):
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.Y_batch = fetch_minibatch(self.Y, self.N_batch) 
            
            # Compute likelihood_UB and gradients 
            NLML_value, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, likelihood: %.2f" % (i, NLML_value)) 
開發者ID:maziarraissi,項目名稱:DeepLearningTutorial,代碼行數:19,代碼來源:VariationalAutoencoders.py

示例6: train

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def train(self):
        
        # Gradients from autograd 
        MSE = value_and_grad(self.MSE)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch_rnn(self.X, self.Y, self.N_batch)
            
            # Compute likelihood_UB and gradients 
            MSE_value, D_MSE = MSE(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, MSE: %.5e" % (i, MSE_value)) 
開發者ID:maziarraissi,項目名稱:DeepLearningTutorial,代碼行數:19,代碼來源:LongShortTermMemoryNetworks.py

示例7: train

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def train(self):
        
        # Gradients from autograd 
        MSE = value_and_grad(self.MSE)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch)
            
            # Compute MSE and gradients 
            MSE_value, D_MSE = MSE(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, MSE: %.5e" % (i, MSE_value)) 
開發者ID:maziarraissi,項目名稱:DeepLearningTutorial,代碼行數:19,代碼來源:NeuralNetworks.py

示例8: train

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def train(self):
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch) 
            
            # Compute likelihood_UB and gradients 
            NLML_value, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, likelihood: %.2f" % (i, NLML_value)) 
開發者ID:maziarraissi,項目名稱:DeepLearningTutorial,代碼行數:19,代碼來源:ConditionalVariationalAutoencoders.py

示例9: _step

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def _step(self, optimizer, X, scalings):
        obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
        scalings = optimizer.next(scalings, np.array(grad))
        scalings = normalize(scalings, x_min=0, x_max=scalings.max())
        return scalings, obj 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:7,代碼來源:energy_layer.py

示例10: _step

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def _step(self, optimizer, X, freeze=None):
        free = np.logical_not(freeze)

        obj, grad, mutual_dist = calc_potential_energy_with_grad(X, self.d, return_mutual_dist=True)
        # obj, grad = value_and_grad(calc_potential_energy)(X, self.d)

        if self.verify_gradient:
            obj, grad = calc_potential_energy_with_grad(X, self.d)
            _obj, _grad = value_and_grad(calc_potential_energy)(X, self.d)
            if np.abs(grad - _grad).mean() > 1e-5:
                print("GRADIENT IMPLEMENTATION IS INCORRECT!")

        # set the gradients for frozen points to zero - make them not to move
        if freeze is not None:
            grad[freeze] = 0

        # project the gradient to have a sum of zero - guarantees to stay on the simplex
        proj_grad = project_onto_sum_equals_zero_plane(grad)

        # normalize the gradients by the largest gradient norm
        if self.norm_gradients:
            norm = np.linalg.norm(proj_grad, axis=1)
            proj_grad = (proj_grad / max(norm.max(), 1e-24))

        # apply a step of gradient descent by subtracting the projected gradient with a learning rate
        X = optimizer.next(X, proj_grad)

        # project the out of bounds points back onto the unit simplex
        X[free] = project_onto_unit_simplex_recursive(X[free])

        # because of floating point issues make sure it is on the unit simplex
        X /= X.sum(axis=1)[:, None]

        return X, obj 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:36,代碼來源:energy.py

示例11: next

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def next(self):

        x = np.random.random((self.n_samples, self.n_dim))
        x = map_onto_unit_simplex(x, "kraemer")
        x = x[vectorized_cdist(x, self.X).min(axis=1).argmax()]

        if self.gradient_descent:

            optimizer = Adam(precision=1e-4)

            # for each iteration of gradient descent
            for i in range(1000):

                # calculate the function value and the gradient
                # auto_obj, auto_grad = value_and_grad(calc_dist_to_others)(x, self.X)
                _obj, _grad = calc_dist_to_others_with_gradient(x, self.X)

                # project the gradient to have a sum of zero - guarantees to stay on the simplex
                proj_grad = project_onto_sum_equals_zero_plane(_grad)

                # apply a step of gradient descent by subtracting the projected gradient with a learning rate
                x = optimizer.next(x, proj_grad)

                # project the out of bounds points back onto the unit simplex
                project_onto_unit_simplex_recursive(x[None, :])

                # because of floating point issues make sure it is on the unit simplex
                x /= x.sum()

                # if there was only a little movement during the last iteration -> terminate
                if optimizer.has_converged:
                    break

        return x 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:36,代碼來源:construction.py

示例12: test_reverse_array

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def test_reverse_array(func, motion, optimized, preserve_result, *args):
  """Test gradients of functions with NumPy-compatible signatures."""

  def tangent_func():
    y = func(*deepcopy(args))
    if np.array(y).size > 1:
      init_grad = np.ones_like(y)
    else:
      init_grad = 1
    func.__globals__['np'] = np
    df = tangent.autodiff(
        func,
        mode='reverse',
        motion=motion,
        optimized=optimized,
        preserve_result=preserve_result,
        verbose=1)
    if motion == 'joint':
      return df(*deepcopy(args) + (init_grad,))
    return df(*deepcopy(args), init_grad=init_grad)

  def reference_func():
    func.__globals__['np'] = ag_np
    if preserve_result:
      val, gradval = ag_value_and_grad(func)(*deepcopy(args))
      return gradval, val
    else:
      return ag_grad(func)(*deepcopy(args))

  def backup_reference_func():
    func.__globals__['np'] = np
    df_num = numeric_grad(func)
    gradval = df_num(*deepcopy(args))
    if preserve_result:
      val = func(*deepcopy(args))
      return gradval, val
    else:
      return gradval

  assert_result_matches_reference(tangent_func, reference_func,
                                  backup_reference_func) 
開發者ID:google,項目名稱:tangent,代碼行數:43,代碼來源:utils.py

示例13: test_forward_array

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def test_forward_array(func, wrt, preserve_result, *args):
  """Test derivatives of functions with NumPy-compatible signatures."""

  def tangent_func():
    func.__globals__['np'] = np
    df = tangent.autodiff(
        func,
        mode='forward',
        preserve_result=preserve_result,
        wrt=wrt,
        optimized=True,
        verbose=1)
    args_ = args + (1.0,)  # seed gradient
    return df(*deepcopy(args_))

  def reference_func():
    func.__globals__['np'] = ag_np
    if preserve_result:
      # Note: ag_value_and_grad returns (val, grad) but we need (grad, val)
      val, gradval = ag_value_and_grad(func)(*deepcopy(args))
      return gradval, val
    else:
      return ag_grad(func)(*deepcopy(args))

  def backup_reference_func():
    func.__globals__['np'] = np
    df_num = numeric_grad(func)
    gradval = df_num(*deepcopy(args))
    if preserve_result:
      val = func(*deepcopy(args))
      return gradval, val
    else:
      return gradval

  assert_result_matches_reference(tangent_func, reference_func,
                                  backup_reference_func) 
開發者ID:google,項目名稱:tangent,代碼行數:38,代碼來源:utils.py

示例14: find_mle

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def find_mle(self, x0, method="adam", bounds=None, rgen=None, callback=None, **kwargs):
        if not rgen:
            rgen = self.rgen
        callback = LoggingCallback(user_callback=callback).callback

        full_surface = self.full_surface

        opt_kwargs = dict(kwargs)
        opt_kwargs.update({'pieces': self.n_minibatches, 'rgen': rgen})

        return _find_minimum(self.avg_neg_log_lik, x0, optimizer=stochastic_opts[method],
                             bounds=bounds, callback=callback, opt_kwargs=opt_kwargs,
                             gradmakers={'fun_and_jac': ag.value_and_grad}) 
開發者ID:popgenmethods,項目名稱:momi2,代碼行數:15,代碼來源:likelihood.py

示例15: test_comparison_values

# 需要導入模塊: import autograd [as 別名]
# 或者: from autograd import value_and_grad [as 別名]
def test_comparison_values():
    compare_funs = [lambda x, y : np.sum(x <  x) + 0.0,
                    lambda x, y : np.sum(x <= y) + 0.0,
                    lambda x, y : np.sum(x >  y) + 0.0,
                    lambda x, y : np.sum(x >= y) + 0.0,
                    lambda x, y : np.sum(x == y) + 0.0,
                    lambda x, y : np.sum(x != y) + 0.0]

    for arg1, arg2 in arg_pairs():
        for fun in compare_funs:
            fun_val = fun(arg1, arg2)
            fun_val_from_grad, _ = value_and_grad(fun)(arg1, arg2)
            assert fun_val == fun_val_from_grad, (fun_val, fun_val_from_grad) 
開發者ID:HIPS,項目名稱:autograd,代碼行數:15,代碼來源:test_binary_ops.py


注:本文中的autograd.value_and_grad方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。