当前位置: 首页>>代码示例>>Python>>正文


Python autograd.value_and_grad方法代码示例

本文整理汇总了Python中autograd.value_and_grad方法的典型用法代码示例。如果您正苦于以下问题:Python autograd.value_and_grad方法的具体用法?Python autograd.value_and_grad怎么用?Python autograd.value_and_grad使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd的用法示例。


在下文中一共展示了autograd.value_and_grad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.y_batch = fetch_minibatch(self.X,self.y,self.N_batch) 
            
            # Compute likelihood and gradients 
            nlml, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        nlml, D_NLML = NLML(self.hyp) 
开发者ID:maziarraissi,项目名称:ParametricGP,代码行数:25,代码来源:parametric_GP.py

示例2: rearrange_dict_grad

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def rearrange_dict_grad(fun):
    """
    Decorator that allows us to save memory on the forward pass,
    by precomputing the gradient
    """
    @primitive
    def wrapped_fun_helper(xdict, dummy):
        ## ag.value_and_grad() to avoid second forward pass
        ## ag.checkpoint() ensures hessian gets properly checkpointed
        val, grad = ag.checkpoint(ag.value_and_grad(fun))(xdict)
        assert len(val.shape) == 0
        dummy.cache = grad
        return val

    def wrapped_fun_helper_grad(ans, xdict, dummy):
        def grad(g):
            #print("foo")
            return {k:g*v for k,v in dummy.cache.items()}
        return grad
    defvjp(wrapped_fun_helper, wrapped_fun_helper_grad, None)

    @functools.wraps(fun)
    def wrapped_fun(xdict):
        return wrapped_fun_helper(ag.dict(xdict), lambda:None)
    return wrapped_fun 
开发者ID:popgenmethods,项目名称:momi2,代码行数:27,代码来源:likelihood.py

示例3: EM

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def EM(init_params, data, callback=None):
    def EM_update(params):
        natural_params = list(map(np.log, params))
        loglike, E_stats = vgrad(log_partition_function)(natural_params, data)  # E step
        if callback: callback(loglike, params)
        return list(map(normalize, E_stats))                                    # M step

    def fixed_point(f, x0):
        x1 = f(x0)
        while different(x0, x1):
            x0, x1 = x1, f(x1)
        return x1

    def different(params1, params2):
        allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
        return not all(map(allclose, params1, params2))

    return fixed_point(EM_update, init_params) 
开发者ID:HIPS,项目名称:autograd,代码行数:20,代码来源:hmm_em.py

示例4: gamma_optimizer

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def gamma_optimizer(gamma_guess, failures, right_censored):
        failures_shifted = failures - gamma_guess[0]
        right_censored_shifted = right_censored - gamma_guess[0]
        all_data_shifted = np.hstack([failures_shifted, right_censored_shifted])
        sp = ss.lognorm.fit(all_data_shifted, floc=0, optimizer='powell')  # scipy's answer is used as an initial guess. Scipy is only correct when there is no censored data
        guess = [np.log(sp[2]), sp[0]]
        warnings.filterwarnings('ignore')  # necessary to supress the warning about the jacobian when using the nelder-mead optimizer
        result = minimize(value_and_grad(Fit_Lognormal_2P.LL), guess, args=(failures_shifted, right_censored_shifted), jac=True, tol=1e-2, method='nelder-mead')

        if result.success is True:
            params = result.x
            mu = params[0]
            sigma = params[1]
        else:
            print('WARNING: Fitting using Autograd FAILED for the gamma optimisation section of Lognormal_3P. The fit from Scipy was used instead so results may not be accurate.')
            mu = sp[2]
            sigma = sp[0]

        LL2 = 2 * Fit_Lognormal_2P.LL([mu, sigma], failures_shifted, right_censored_shifted)
        return LL2 
开发者ID:MatthewReid854,项目名称:reliability,代码行数:22,代码来源:Fitters.py

示例5: train

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def train(self):
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.Y_batch = fetch_minibatch(self.Y, self.N_batch) 
            
            # Compute likelihood_UB and gradients 
            NLML_value, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, likelihood: %.2f" % (i, NLML_value)) 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:19,代码来源:VariationalAutoencoders.py

示例6: train

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def train(self):
        
        # Gradients from autograd 
        MSE = value_and_grad(self.MSE)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch_rnn(self.X, self.Y, self.N_batch)
            
            # Compute likelihood_UB and gradients 
            MSE_value, D_MSE = MSE(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, MSE: %.5e" % (i, MSE_value)) 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:19,代码来源:LongShortTermMemoryNetworks.py

示例7: train

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def train(self):
        
        # Gradients from autograd 
        MSE = value_and_grad(self.MSE)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch)
            
            # Compute MSE and gradients 
            MSE_value, D_MSE = MSE(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_MSE, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, MSE: %.5e" % (i, MSE_value)) 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:19,代码来源:NeuralNetworks.py

示例8: train

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def train(self):
        
        # Gradients from autograd 
        NLML = value_and_grad(self.likelihood)
        
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            self.X_batch, self.Y_batch = fetch_minibatch(self.X, self.Y, self.N_batch) 
            
            # Compute likelihood_UB and gradients 
            NLML_value, D_NLML = NLML(self.hyp)
            
            # Update hyper-parameters
            self.hyp, self.mt_hyp, self.vt_hyp = stochastic_update_Adam(self.hyp, D_NLML, self.mt_hyp, self.vt_hyp, self.lrate, i)
            
            if i % self.monitor_likelihood == 0:
                print("Iteration: %d, likelihood: %.2f" % (i, NLML_value)) 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:19,代码来源:ConditionalVariationalAutoencoders.py

示例9: _step

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def _step(self, optimizer, X, scalings):
        obj, grad = value_and_grad(calc_potential_energy)(scalings, X)
        scalings = optimizer.next(scalings, np.array(grad))
        scalings = normalize(scalings, x_min=0, x_max=scalings.max())
        return scalings, obj 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:7,代码来源:energy_layer.py

示例10: _step

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def _step(self, optimizer, X, freeze=None):
        free = np.logical_not(freeze)

        obj, grad, mutual_dist = calc_potential_energy_with_grad(X, self.d, return_mutual_dist=True)
        # obj, grad = value_and_grad(calc_potential_energy)(X, self.d)

        if self.verify_gradient:
            obj, grad = calc_potential_energy_with_grad(X, self.d)
            _obj, _grad = value_and_grad(calc_potential_energy)(X, self.d)
            if np.abs(grad - _grad).mean() > 1e-5:
                print("GRADIENT IMPLEMENTATION IS INCORRECT!")

        # set the gradients for frozen points to zero - make them not to move
        if freeze is not None:
            grad[freeze] = 0

        # project the gradient to have a sum of zero - guarantees to stay on the simplex
        proj_grad = project_onto_sum_equals_zero_plane(grad)

        # normalize the gradients by the largest gradient norm
        if self.norm_gradients:
            norm = np.linalg.norm(proj_grad, axis=1)
            proj_grad = (proj_grad / max(norm.max(), 1e-24))

        # apply a step of gradient descent by subtracting the projected gradient with a learning rate
        X = optimizer.next(X, proj_grad)

        # project the out of bounds points back onto the unit simplex
        X[free] = project_onto_unit_simplex_recursive(X[free])

        # because of floating point issues make sure it is on the unit simplex
        X /= X.sum(axis=1)[:, None]

        return X, obj 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:36,代码来源:energy.py

示例11: next

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def next(self):

        x = np.random.random((self.n_samples, self.n_dim))
        x = map_onto_unit_simplex(x, "kraemer")
        x = x[vectorized_cdist(x, self.X).min(axis=1).argmax()]

        if self.gradient_descent:

            optimizer = Adam(precision=1e-4)

            # for each iteration of gradient descent
            for i in range(1000):

                # calculate the function value and the gradient
                # auto_obj, auto_grad = value_and_grad(calc_dist_to_others)(x, self.X)
                _obj, _grad = calc_dist_to_others_with_gradient(x, self.X)

                # project the gradient to have a sum of zero - guarantees to stay on the simplex
                proj_grad = project_onto_sum_equals_zero_plane(_grad)

                # apply a step of gradient descent by subtracting the projected gradient with a learning rate
                x = optimizer.next(x, proj_grad)

                # project the out of bounds points back onto the unit simplex
                project_onto_unit_simplex_recursive(x[None, :])

                # because of floating point issues make sure it is on the unit simplex
                x /= x.sum()

                # if there was only a little movement during the last iteration -> terminate
                if optimizer.has_converged:
                    break

        return x 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:36,代码来源:construction.py

示例12: test_reverse_array

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def test_reverse_array(func, motion, optimized, preserve_result, *args):
  """Test gradients of functions with NumPy-compatible signatures."""

  def tangent_func():
    y = func(*deepcopy(args))
    if np.array(y).size > 1:
      init_grad = np.ones_like(y)
    else:
      init_grad = 1
    func.__globals__['np'] = np
    df = tangent.autodiff(
        func,
        mode='reverse',
        motion=motion,
        optimized=optimized,
        preserve_result=preserve_result,
        verbose=1)
    if motion == 'joint':
      return df(*deepcopy(args) + (init_grad,))
    return df(*deepcopy(args), init_grad=init_grad)

  def reference_func():
    func.__globals__['np'] = ag_np
    if preserve_result:
      val, gradval = ag_value_and_grad(func)(*deepcopy(args))
      return gradval, val
    else:
      return ag_grad(func)(*deepcopy(args))

  def backup_reference_func():
    func.__globals__['np'] = np
    df_num = numeric_grad(func)
    gradval = df_num(*deepcopy(args))
    if preserve_result:
      val = func(*deepcopy(args))
      return gradval, val
    else:
      return gradval

  assert_result_matches_reference(tangent_func, reference_func,
                                  backup_reference_func) 
开发者ID:google,项目名称:tangent,代码行数:43,代码来源:utils.py

示例13: test_forward_array

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def test_forward_array(func, wrt, preserve_result, *args):
  """Test derivatives of functions with NumPy-compatible signatures."""

  def tangent_func():
    func.__globals__['np'] = np
    df = tangent.autodiff(
        func,
        mode='forward',
        preserve_result=preserve_result,
        wrt=wrt,
        optimized=True,
        verbose=1)
    args_ = args + (1.0,)  # seed gradient
    return df(*deepcopy(args_))

  def reference_func():
    func.__globals__['np'] = ag_np
    if preserve_result:
      # Note: ag_value_and_grad returns (val, grad) but we need (grad, val)
      val, gradval = ag_value_and_grad(func)(*deepcopy(args))
      return gradval, val
    else:
      return ag_grad(func)(*deepcopy(args))

  def backup_reference_func():
    func.__globals__['np'] = np
    df_num = numeric_grad(func)
    gradval = df_num(*deepcopy(args))
    if preserve_result:
      val = func(*deepcopy(args))
      return gradval, val
    else:
      return gradval

  assert_result_matches_reference(tangent_func, reference_func,
                                  backup_reference_func) 
开发者ID:google,项目名称:tangent,代码行数:38,代码来源:utils.py

示例14: find_mle

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def find_mle(self, x0, method="adam", bounds=None, rgen=None, callback=None, **kwargs):
        if not rgen:
            rgen = self.rgen
        callback = LoggingCallback(user_callback=callback).callback

        full_surface = self.full_surface

        opt_kwargs = dict(kwargs)
        opt_kwargs.update({'pieces': self.n_minibatches, 'rgen': rgen})

        return _find_minimum(self.avg_neg_log_lik, x0, optimizer=stochastic_opts[method],
                             bounds=bounds, callback=callback, opt_kwargs=opt_kwargs,
                             gradmakers={'fun_and_jac': ag.value_and_grad}) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:15,代码来源:likelihood.py

示例15: test_comparison_values

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import value_and_grad [as 别名]
def test_comparison_values():
    compare_funs = [lambda x, y : np.sum(x <  x) + 0.0,
                    lambda x, y : np.sum(x <= y) + 0.0,
                    lambda x, y : np.sum(x >  y) + 0.0,
                    lambda x, y : np.sum(x >= y) + 0.0,
                    lambda x, y : np.sum(x == y) + 0.0,
                    lambda x, y : np.sum(x != y) + 0.0]

    for arg1, arg2 in arg_pairs():
        for fun in compare_funs:
            fun_val = fun(arg1, arg2)
            fun_val_from_grad, _ = value_and_grad(fun)(arg1, arg2)
            assert fun_val == fun_val_from_grad, (fun_val, fun_val_from_grad) 
开发者ID:HIPS,项目名称:autograd,代码行数:15,代码来源:test_binary_ops.py


注:本文中的autograd.value_and_grad方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。