当前位置: 首页>>代码示例>>Python>>正文


Python numpy.stack方法代码示例

本文整理汇总了Python中autograd.numpy.stack方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.stack方法的具体用法?Python numpy.stack怎么用?Python numpy.stack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.stack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: calc_jacobian

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def calc_jacobian(start, end):

    # if the end_box is not a box - autograd can not track back
    if not isbox(end):
        return vspace(start.shape).zeros()

    # the final jacobian matrices
    jac = []

    # the backward pass is done for each objective function once
    for j in range(end.shape[1]):
        b = anp.zeros(end.shape)
        b[:, j] = 1
        n = new_box(b, 0, VJPNode.new_root())
        _jac = backward_pass(n, end._node)
        jac.append(_jac)

    jac = anp.stack(jac, axis=1)

    return jac 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:22,代码来源:gradient.py

示例2: _build_errors_df

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def _build_errors_df(name_errors, label):
  """Helper to build errors DataFrame."""
  series = []
  percentiles = np.linspace(0, 100, 21)
  index = percentiles / 100
  for name, errors in name_errors:
    series.append(pd.Series(
        np.nanpercentile(errors, q=percentiles), index=index, name=name))
  df = pd.concat(series, axis=1)
  df.columns.name = 'derivative'
  df.index.name = 'quantile'
  df = df.stack().rename('error').reset_index()
  with np.errstate(divide='ignore'):
    df['log(error)'] = np.log(df['error'])
  if label is not None:
    df['label'] = label
  return df 
开发者ID:google,项目名称:tf-quant-finance,代码行数:19,代码来源:methods.py

示例3: test_grad_and_aux

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)

## No longer support this behavior
# def test_make_ggnvp_broadcasting():
#   A = npr.randn(4, 5)
#   x = npr.randn(10, 4)
#   v = npr.randn(10, 4)

#   fun = lambda x: np.tanh(np.dot(x, A))
#   res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
#   res2 = make_ggnvp(fun)(x)(v)
#   check_equivalent(res1, res2) 
开发者ID:HIPS,项目名称:autograd,代码行数:24,代码来源:test_wrappers.py

示例4: calc_jacobian

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def calc_jacobian(start, end):

    # if the end_box is not a box - autograd can not track back
    if not isbox(end):
        return vspace(start.shape).zeros()

    # the final jacobian matrices
    jac = []

    # the backward pass is done for each objective function once
    for j in range(end.shape[1]):
        b = anp.zeros(end.shape)
        b[:, j] = 1
        n = new_box(b, 0, VJPNode.new_root(b))
        _jac = backward_pass(n, end._node)
        jac.append(_jac)

    jac = anp.stack(jac, axis=1)

    return jac 
开发者ID:msu-coinlab,项目名称:pymop,代码行数:22,代码来源:gradient.py

示例5: convert_results

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def convert_results(results, interface):
        """Convert a list of results coming from multiple QNodes
        to the object required by each interface for auto-differentiation.

        Internally, this method makes use of ``tf.stack``, ``torch.stack``,
        and ``np.vstack``.

        Args:
            results (list): list containing the results from
                multiple QNodes
            interface (str): the interfaces of the underlying QNodes

        Returns:
            list or array or torch.Tensor or tf.Tensor: the converted
            and stacked results.
        """
        if interface == "tf":
            import tensorflow as tf

            return tf.stack(results)

        if interface == "torch":
            import torch

            return torch.stack(results, dim=0)

        if interface in ("autograd", "numpy"):
            from autograd import numpy as np

            return np.stack(results)

        return results 
开发者ID:XanaduAI,项目名称:pennylane,代码行数:34,代码来源:qnode_collection.py

示例6: grid

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def grid(num, ndim, large=False):
  """Build a uniform grid with num points along each of ndim axes."""
  if not large:
    _check_not_too_large(np.power(num, ndim) * ndim)
  x = np.linspace(0, 1, num, dtype='float64')
  w = 1 / (num - 1)
  points = np.stack(
      np.meshgrid(*[x for _ in range(ndim)], indexing='ij'), axis=-1)
  return points, w 
开发者ID:google,项目名称:tf-quant-finance,代码行数:11,代码来源:methods.py

示例7: autograd

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def autograd(f, ds, points):
  """Evaluate derivatives of f on the given points."""
  df_ds = lambda *args: f(np.stack(args, axis=-1))
  for i in ds:
    df_ds = egrad(df_ds, i)
  ndim = points.shape[-1]
  return df_ds(*[points[..., i] for i in range(ndim)]) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:9,代码来源:methods.py

示例8: make_pinwheel

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate,
                  rs=npr.RandomState(0)):
    """Based on code by Ryan P. Adams."""
    rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)

    features = rs.randn(num_classes*num_per_class, 2) \
        * np.array([radial_std, tangential_std])
    features[:, 0] += 1
    labels = np.repeat(np.arange(num_classes), num_per_class)

    angles = rads[labels] + rate * np.exp(features[:,0])
    rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])
    rotations = np.reshape(rotations.T, (-1, 2, 2))

    return np.einsum('ti,tij->tj', features, rotations) 
开发者ID:HIPS,项目名称:autograd,代码行数:17,代码来源:data.py

示例9: test_stack_1d

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def test_stack_1d():  combo_check(np.stack,  [0])([(R(2),), (R(2), R(2))], axis=[0, 1]) 
开发者ID:HIPS,项目名称:autograd,代码行数:3,代码来源:test_systematic.py

示例10: get_d_paretomtl_init

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def get_d_paretomtl_init(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL initialization
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
    
    if np.sum(idx) <= 0:
        return np.zeros(nobj)
    if np.sum(idx) == 1:
        sol = np.ones(1)
    else:
        vec =  np.dot(w[idx],grads)
        sol, nd = MinNormSolver.find_min_norm_element(vec)

    # calculate the weights
    weight0 =  np.sum(np.array([sol[j] * w[idx][j ,0] for j in np.arange(0, np.sum(idx))]))
    weight1 =  np.sum(np.array([sol[j] * w[idx][j ,1] for j in np.arange(0, np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
开发者ID:Xi-L,项目名称:ParetoMTL,代码行数:29,代码来源:run_synthetic_example.py

示例11: concave_fun_eval

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def concave_fun_eval(x):
    """
    return the function values and gradient values
    """
    return np.stack([f1(x), f2(x)]), np.stack([f1_dx(x), f2_dx(x)])
    
    
### create the ground truth Pareto front ### 
开发者ID:Xi-L,项目名称:ParetoMTL,代码行数:10,代码来源:run_synthetic_example.py

示例12: linear_scalarization_search

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def linear_scalarization_search(t_iter = 100, n_dim = 20, step_size = 1):
    """
    linear scalarization with randomly generated weights
    """
    r = np.random.rand(1)
    weights = np.stack([r, 1-r])
    
    x = np.random.uniform(-0.5,0.5,n_dim)
    
    for t in range(t_iter):
        f, f_dx = concave_fun_eval(x)
        x = x - step_size * np.dot(weights.T,f_dx).flatten()
    
    return x, f 
开发者ID:Xi-L,项目名称:ParetoMTL,代码行数:16,代码来源:run_synthetic_example.py

示例13: callback

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def callback(params, iter, g):

        pred = ode_pred(params, true_y0, t)

        print("Iteration {:d} train loss {:.6f}".format(
              iter, L1_loss(pred, true_y)))

        ax_traj.cla()
        ax_traj.set_title('Trajectories')
        ax_traj.set_xlabel('t')
        ax_traj.set_ylabel('x,y')
        ax_traj.plot(t, true_y[:, 0], '-', t, true_y[:, 1], 'g-')
        ax_traj.plot(t, pred[:, 0], '--', t, pred[:, 1], 'b--')
        ax_traj.set_xlim(t.min(), t.max())
        ax_traj.set_ylim(-2, 2)
        ax_traj.xaxis.set_ticklabels([])
        ax_traj.yaxis.set_ticklabels([])
        ax_traj.legend()

        ax_phase.cla()
        ax_phase.set_title('Phase Portrait')
        ax_phase.set_xlabel('x')
        ax_phase.set_ylabel('y')
        ax_phase.plot(true_y[:, 0], true_y[:, 1], 'g-')
        ax_phase.plot(pred[:, 0], pred[:, 1], 'b--')
        ax_phase.set_xlim(-2, 2)
        ax_phase.set_ylim(-2, 2)
        ax_phase.xaxis.set_ticklabels([])
        ax_phase.yaxis.set_ticklabels([])

        ax_vecfield.cla()
        ax_vecfield.set_title('Learned Vector Field')
        ax_vecfield.set_xlabel('x')
        ax_vecfield.set_ylabel('y')
        ax_vecfield.xaxis.set_ticklabels([])
        ax_vecfield.yaxis.set_ticklabels([])

        # vector field plot
        y, x = npo.mgrid[-2:2:21j, -2:2:21j]
        dydt = nn_predict(np.stack([x, y], -1).reshape(21 * 21, 2), 0,
            params).reshape(-1, 2)
        mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1)
        dydt = (dydt / mag)
        dydt = dydt.reshape(21, 21, 2)

        ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black")
        ax_vecfield.set_xlim(-2, 2)
        ax_vecfield.set_ylim(-2, 2)

        fig.tight_layout()
        plt.draw()
        plt.pause(0.001)


    # Train neural net dynamics to match data. 
开发者ID:HIPS,项目名称:autograd,代码行数:57,代码来源:ode_net.py

示例14: build_mog_bbsvi

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def build_mog_bbsvi(logprob, num_samples, k=10, rs=npr.RandomState(0)):
    init_component_var_params = init_gaussian_var_params
    component_log_density = variational_log_density_gaussian
    component_sample = sample_diag_gaussian

    def unpack_mixture_params(mixture_params):
        log_weights = log_normalize(mixture_params[:k])
        var_params = np.reshape(mixture_params[k:], (k, -1))
        return log_weights, var_params

    def init_var_params(D, rs=npr.RandomState(0), **kwargs):
        log_weights = np.ones(k)
        component_weights = [init_component_var_params(D, rs=rs, **kwargs) for i in range(k)]
        return np.concatenate([log_weights] + component_weights)

    def sample(var_mixture_params, num_samples, rs):
        """Sample locations aren't a continuous function of parameters
        due to multinomial sampling."""
        log_weights, var_params = unpack_mixture_params(var_mixture_params)
        samples = np.concatenate([component_sample(params_k, num_samples, rs)[:, np.newaxis, :]
                             for params_k in var_params], axis=1)
        ixs = np.random.choice(k, size=num_samples, p=np.exp(log_weights))
        return np.array([samples[i, ix, :] for i, ix in enumerate(ixs)])

    def mixture_log_density(var_mixture_params, x):
        """Returns a weighted average over component densities."""
        log_weights, var_params = unpack_mixture_params(var_mixture_params)
        component_log_densities = np.vstack([component_log_density(params_k, x)
                                             for params_k in var_params]).T
        return logsumexp(component_log_densities + log_weights, axis=1, keepdims=False)

    def mixture_elbo(var_mixture_params, t):
        # We need to only sample the continuous component parameters,
        # and integrate over the discrete component choice

        def mixture_lower_bound(params):
            """Provides a stochastic estimate of the variational lower bound."""
            samples = component_sample(params, num_samples, rs)
            log_qs = mixture_log_density(var_mixture_params, samples)
            log_ps = logprob(samples, t)
            log_ps = np.reshape(log_ps, (num_samples, -1))
            log_qs = np.reshape(log_qs, (num_samples, -1))
            return np.mean(log_ps - log_qs)

        log_weights, var_params = unpack_mixture_params(var_mixture_params)
        component_elbos = np.stack(
            [mixture_lower_bound(params_k) for params_k in var_params])
        return np.sum(component_elbos*np.exp(log_weights))

    return init_var_params, mixture_elbo, mixture_log_density, sample 
开发者ID:HIPS,项目名称:autograd,代码行数:52,代码来源:mixture_variational_inference.py

示例15: get_d_paretomtl

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import stack [as 别名]
def get_d_paretomtl(grads,value,weights,i):
    # calculate the gradient direction for Pareto MTL
    nobj, dim = grads.shape
    
    # check active constraints
    normalized_current_weight = weights[i]/np.linalg.norm(weights[i])
    normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm(np.delete(weights, (i), axis=0), axis = 1,keepdims = True)
    w = normalized_rest_weights - normalized_current_weight
    
    
    # solve QP 
    gx =  np.dot(w,value/np.linalg.norm(value))
    idx = gx >  0
   
    
    vec =  np.concatenate((grads, np.dot(w[idx],grads)), axis = 0)
    
#    # use cvxopt to solve QP
#    
#    P = np.dot(vec , vec.T)
#    
#    q = np.zeros(nobj + np.sum(idx))
#    
#    G =  - np.eye(nobj + np.sum(idx) )
#    h = np.zeros(nobj + np.sum(idx))
#    
#
#    
#    A = np.ones(nobj + np.sum(idx)).reshape(1,nobj + np.sum(idx))
#    b = np.ones(1)
 
#    cvxopt.solvers.options['show_progress'] = False
#    sol = cvxopt_solve_qp(P, q, G, h, A, b)
  
    # use MinNormSolver to solve QP
    sol, nd = MinNormSolver.find_min_norm_element(vec)
   
    
    # reformulate ParetoMTL as linear scalarization method, return the weights
    weight0 =  sol[0] + np.sum(np.array([sol[j] * w[idx][j - 2,0] for j in np.arange(2,2 + np.sum(idx))]))
    weight1 = sol[1] + np.sum(np.array([sol[j] * w[idx][j - 2,1] for j in np.arange(2,2 + np.sum(idx))]))
    weight = np.stack([weight0,weight1])
   

    return weight 
开发者ID:Xi-L,项目名称:ParetoMTL,代码行数:47,代码来源:run_synthetic_example.py


注:本文中的autograd.numpy.stack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。