当前位置: 首页>>代码示例>>Python>>正文


Python numpy.concatenate方法代码示例

本文整理汇总了Python中autograd.numpy.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.concatenate方法的具体用法?Python numpy.concatenate怎么用?Python numpy.concatenate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _calc_pareto_front

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def _calc_pareto_front(self, n_points=100, flatten=True):
        regions = [[0, 0.0830015349],
                   [0.182228780, 0.2577623634],
                   [0.4093136748, 0.4538821041],
                   [0.6183967944, 0.6525117038],
                   [0.8233317983, 0.8518328654]]

        pf = []

        for r in regions:
            x1 = anp.linspace(r[0], r[1], int(n_points / len(regions)))
            x2 = 1 - anp.sqrt(x1) - x1 * anp.sin(10 * anp.pi * x1)
            pf.append(anp.array([x1, x2]).T)

        if not flatten:
            pf = anp.concatenate([pf[None,...] for pf in pf])
        else:
            pf = anp.row_stack(pf)

        return pf 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:22,代码来源:zdt.py

示例2: sfs

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def sfs(self, n):
        if n == 0:
            return np.array([0.])
        Et_jj = self.etjj(n)
        #assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)

        ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)

        before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
        # ignore branch length above untruncated TMRCA
        if self.tau == float('inf'):
            before_tmrca = 0.0

        ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
        return ret

    # def transition_prob(self, v, axis=0):
    #     return moran_model.moran_action(self.scaled_time, v, axis=axis) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:20,代码来源:size_history.py

示例3: get_ith_minibatch_ixs_fences

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def get_ith_minibatch_ixs_fences(b_i, batch_size, fences):
    """Split timeseries data of uneven sequence lengths into batches.
    This is how we handle different sized sequences.
    
    @param b_i: integer
                iteration index
    @param batch_size: integer
                       size of batch
    @param fences: list of integers
                   sequence of cutoff array
    @return idx: integer
    @return batch_slice: slice object
    """
    num_data = len(fences) - 1
    num_minibatches = num_data / batch_size + ((num_data % batch_size) > 0)
    b_i = b_i % num_minibatches
    idx = slice(b_i * batch_size, (b_i+1) * batch_size)
    batch_i = np.arange(num_data)[idx]
    batch_slice = np.concatenate([range(i, j) for i, j in 
                                  zip(fences[batch_i], fences[batch_i+1])])
    return idx, batch_slice 
开发者ID:dtak,项目名称:tree-regularization-public,代码行数:23,代码来源:train.py

示例4: optimize_and_lls

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def optimize_and_lls(optfun):
        num_iters = 200
        elbos     = []
        def callback(params, t, g):
            elbo_val = -objective(params, t)
            elbos.append(elbo_val)
            if t % 50 == 0:
                print("Iteration {} lower bound {}".format(t, elbo_val))

        init_mean    = -1 * np.ones(D)
        init_log_std = -5 * np.ones(D)
        init_var_params = np.concatenate([init_mean, init_log_std])
        variational_params = optfun(num_iters, init_var_params, callback)
        return np.array(elbos)

    # let's optimize this with a few different step sizes 
开发者ID:HIPS,项目名称:autograd,代码行数:18,代码来源:natural_gradient_black_box_svi.py

示例5: callback

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def callback(X, y, predict_func, acquisition_function, next_point, new_value):
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(domain_min, domain_max, 300), (300,1))
        pred_mean, pred_std = predict_func(plot_xs)
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * pred_std,
                               (pred_mean + 1.96 * pred_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        ax.plot(X, y, 'kx')
        ax.plot(next_point, new_value, 'ro')

        alphas = acquisition_function(plot_xs)
        ax.plot(plot_xs, alphas, 'r')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1) 
开发者ID:HIPS,项目名称:autograd,代码行数:24,代码来源:bayesian_optimization.py

示例6: plot_gp

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def plot_gp(ax, X, y, pred_mean, pred_cov, plot_xs):
        ax.cla()
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)
        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([]) 
开发者ID:HIPS,项目名称:autograd,代码行数:19,代码来源:deep_gaussian_process.py

示例7: test_cast_to_int

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def test_cast_to_int():
    inds = np.ones(5)[:,None]

    def fun(W):
        # glue W and inds together
        glued_together = np.concatenate((W, inds), axis=1)

        # separate W and inds back out
        new_W = W[:,:-1]
        new_inds = np.int64(W[:,-1])

        assert new_inds.dtype == np.int64
        return new_W[new_inds].sum()

    W = np.random.randn(5, 10)
    check_grads(fun)(W) 
开发者ID:HIPS,项目名称:autograd,代码行数:18,代码来源:test_numpy.py

示例8: initialize_NN

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def initialize_NN(self, Q):
        hyp = np.array([])
        layers = Q.shape[0]
        for layer in range(0,layers-2):
            A = -np.sqrt(6.0/(Q[layer]+Q[layer+1])) + 2.0*np.sqrt(6.0/(Q[layer]+Q[layer+1]))*np.random.rand(Q[layer],Q[layer+1])
            b = np.zeros((1,Q[layer+1]))
            hyp = np.concatenate([hyp, A.ravel(), b.ravel()])

        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        A = -np.sqrt(6.0/(Q[-2]+Q[-1])) + 2.0*np.sqrt(6.0/(Q[-2]+Q[-1]))*np.random.rand(Q[-2],Q[-1])
        b = np.zeros((1,Q[-1]))
        hyp = np.concatenate([hyp, A.ravel(), b.ravel()])
        
        return hyp 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:19,代码来源:VariationalAutoencoders.py

示例9: __init__

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def __init__(self, X, y, M=10, max_iter = 2000, N_batch = 1, 
                 monitor_likelihood = 10, lrate = 1e-3):
        (N,D) = X.shape
        N_subset = min(N, 10000)
        idx = np.random.choice(N, N_subset, replace=False)
        kmeans = KMeans(n_clusters=M, random_state=0).fit(X[idx,:])
        Z = kmeans.cluster_centers_
    
        hyp = np.log(np.ones(D+1))
        logsigma_n = np.array([-4.0])
        hyp = np.concatenate([hyp, logsigma_n])
    
        m = np.zeros((M,1))
        S = kernel(Z,Z,hyp[:-1])

        self.X = X
        self.y = y
        
        self.M = M
        self.Z = Z
        self.m = m
        self.S = S
        
        self.hyp= hyp
        
        self.max_iter = max_iter
        self.N_batch = N_batch
        self.monitor_likelihood = monitor_likelihood
        self.jitter = 1e-8
        self.jitter_cov = 1e-8
        
        # Adam optimizer parameters
        self.mt_hyp = np.zeros(hyp.shape)
        self.vt_hyp = np.zeros(hyp.shape)
        self.lrate = lrate 
开发者ID:maziarraissi,项目名称:ParametricGP,代码行数:37,代码来源:parametric_GP.py

示例10: transformed_expi

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def transformed_expi(x):
    abs_x = np.abs(x)
    ser = abs_x < 1. / 45.
    nser = np.logical_not(ser)

#     ret = np.zeros(x.shape)
#     ret[ser], ret[nser] = transformed_expi_series(x[ser]), transformed_expi_naive(x[nser])))
#     return ret

    # We use np.concatenate to combine.
    # would be better to use ret[ser] and ret[nser] as commented out above
    # but array assignment not yet supported by autograd
    assert np.all(abs_x[:-1] >= abs_x[1:])
    return np.concatenate((transformed_expi_naive(x[nser]), transformed_expi_series(x[ser]))) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:16,代码来源:math_functions.py

示例11: expm1d

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def expm1d(x, eps=1e-6):
    x = np.array(x)
    abs_x = np.abs(x)
    if x.shape:
        # FIXME: don't require abs_x to be increasing
        assert np.all(abs_x[1:] >= abs_x[:-1])
        small = abs_x < eps
        big = ~small
        return np.concatenate([expm1d_taylor(x[small]),
                               expm1d_naive(x[big])])
    elif abs_x < eps:
        return expm1d_taylor(x)
    else:
        return expm1d_naive(x) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:16,代码来源:math_functions.py

示例12: taylor_approx

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def taylor_approx(target, stencil, values):
  """Use taylor series to approximate up to second order derivatives.

  Args:
    target: An array of shape (..., n), a batch of n-dimensional points
      where one wants to approximate function value and derivatives.
    stencil: An array of shape broadcastable to (..., k, n), for each target
      point a set of k = triangle(n + 1) points to use on its approximation.
    values: An array of shape broadcastable to (..., k), the function value at
      each of the stencil points.

  Returns:
    An array of shape (..., k), for each target point the approximated
    function value, gradient and hessian evaluated at that point (flattened
    and in the same order as returned by derivative_names).
  """
  # Broadcast arrays to their required shape.
  batch_shape, ndim = target.shape[:-1], target.shape[-1]
  stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim))
  values = np.broadcast_to(values, stencil.shape[:-1])

  # Subtract target from each stencil point.
  delta_x = stencil - np.expand_dims(target, axis=-2)
  delta_xy = np.matmul(
      np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2))
  i = np.arange(ndim)
  j, k = np.triu_indices(ndim, k=1)

  # Build coefficients for the Taylor series equations, namely:
  #   f(stencil) = coeffs @ [f(target), df/d0(target), ...]
  coeffs = np.concatenate([
      np.ones(delta_x.shape[:-1] + (1,)),  # f(target)
      delta_x,  # df/di(target)
      delta_xy[..., i, i] / 2,  # d^2f/di^2(target)
      delta_xy[..., j, k],  # d^2f/{dj dk}(target)
  ], axis=-1)

  # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil)
  return np.squeeze(
      np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:42,代码来源:methods.py

示例13: fisher_diag

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def fisher_diag(lam):
        mu, log_sigma = unpack_params(lam)
        return np.concatenate([np.exp(-2.*log_sigma),
                               np.ones(len(log_sigma))*2])

    # simple! basically free! 
开发者ID:HIPS,项目名称:autograd,代码行数:8,代码来源:natural_gradient_black_box_svi.py

示例14: build_toy_dataset

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def build_toy_dataset(D=1, n_data=20, noise_std=0.1):
    rs = npr.RandomState(0)
    inputs  = np.concatenate([np.linspace(0, 3, num=n_data/2),
                              np.linspace(6, 8, num=n_data/2)])
    targets = (np.cos(inputs) + rs.randn(n_data) * noise_std) / 2.0
    inputs = (inputs - 4.0) / 2.0
    inputs  = inputs.reshape((len(inputs), D))
    return inputs, targets 
开发者ID:HIPS,项目名称:autograd,代码行数:10,代码来源:gaussian_process.py

示例15: callback

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import concatenate [as 别名]
def callback(params):
        print("Log likelihood {}".format(-objective(params)))
        plt.cla()

        # Show posterior marginals.
        plot_xs = np.reshape(np.linspace(-7, 7, 300), (300,1))
        pred_mean, pred_cov = predict(params, X, y, plot_xs)
        marg_std = np.sqrt(np.diag(pred_cov))
        ax.plot(plot_xs, pred_mean, 'b')
        ax.fill(np.concatenate([plot_xs, plot_xs[::-1]]),
                np.concatenate([pred_mean - 1.96 * marg_std,
                               (pred_mean + 1.96 * marg_std)[::-1]]),
                alpha=.15, fc='Blue', ec='None')

        # Show samples from posterior.
        rs = npr.RandomState(0)
        sampled_funcs = rs.multivariate_normal(pred_mean, pred_cov, size=10)
        ax.plot(plot_xs, sampled_funcs.T)

        ax.plot(X, y, 'kx')
        ax.set_ylim([-1.5, 1.5])
        ax.set_xticks([])
        ax.set_yticks([])
        plt.draw()
        plt.pause(1.0/60.0)

    # Initialize covariance parameters 
开发者ID:HIPS,项目名称:autograd,代码行数:29,代码来源:gaussian_process.py


注:本文中的autograd.numpy.concatenate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。