当前位置: 首页>>代码示例>>Python>>正文


Python numpy.zeros方法代码示例

本文整理汇总了Python中autograd.numpy.zeros方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.zeros方法的具体用法?Python numpy.zeros怎么用?Python numpy.zeros使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.zeros方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def __init__(self, n_var=2, n_constr=2, **kwargs):
        super().__init__(n_var, n_constr, **kwargs)

        a, b = anp.zeros(n_constr + 1), anp.zeros(n_constr + 1)
        a[0], b[0] = 1, 1
        delta = 1 / (n_constr + 1)
        alpha = delta

        for j in range(n_constr):
            beta = a[j] * anp.exp(-b[j] * alpha)
            a[j + 1] = (a[j] + beta) / 2
            b[j + 1] = - 1 / alpha * anp.log(beta / a[j + 1])

            alpha += delta

        self.a = a[1:]
        self.b = b[1:] 
开发者ID:msu-coinlab,项目名称:pymoo,代码行数:19,代码来源:ctp.py

示例2: fast_zero_pad

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def fast_zero_pad(arr, pad_width):
    """Fast version of numpy.pad when `mode="constant"`

    Executing `numpy.pad` with zeros is ~1000 times slower
    because it doesn't make use of the `zeros` method for padding.

    Paramters
    ---------
    arr: array
        The array to pad
    pad_width: tuple
        Number of values padded to the edges of each axis.
        See numpy docs for more.

    Returns
    -------
    result: array
        The array padded with `constant_values`
    """
    newshape = tuple([a+ps[0]+ps[1] for a, ps in zip(arr.shape, pad_width)])

    result = np.zeros(newshape, dtype=arr.dtype)
    slices = tuple([slice(start, s-end) for s, (start, end) in zip(result.shape, pad_width)])
    result[slices] = arr
    return result 
开发者ID:pmelchior,项目名称:scarlet,代码行数:27,代码来源:fft.py

示例3: log_norm

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def log_norm(self):
        try:
            return self._log_norm
        except AttributeError:
            if self.frame != self.model_frame:
                images_ = self.images[self.slices_for_images]
                weights_ = self.weights[self.slices_for_images]
            else:
                images_ = self.images
                weights_ = self.weights

            # normalization of the single-pixel likelihood:
            # 1 / [(2pi)^1/2 (sigma^2)^1/2]
            # with inverse variance weights: sigma^2 = 1/weight
            # full likelihood is sum over all data samples: pixel in images
            # NOTE: this assumes that all pixels are used in likelihood!
            log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype)
            cuts = weights_ > 0
            log_sigma[cuts] = np.log(1 / weights_[cuts])
            self._log_norm = (
                    np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                    + np.sum(log_sigma) / 2
            )
        return self._log_norm 
开发者ID:pmelchior,项目名称:scarlet,代码行数:26,代码来源:observation.py

示例4: get_loss

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def get_loss(self, model):
        """Computes the loss/fidelity of a given model wrt to the observation
        Parameters
        ----------
        model: array
            A model from `Blend`
        Returns
        -------
        loss: float
            Loss of the model
        """
        model_ = self.render(model)
        images_ = self.images
        weights_ = self.weights

        # properly normalized likelihood
        log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
        cuts = weights_ > 0
        log_sigma[cuts] = np.log(1 / weights_[cuts])
        log_norm = (
                np.prod(images_.shape) / 2 * np.log(2 * np.pi)
                + np.sum(log_sigma) / 2
        )

        return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2) 
开发者ID:pmelchior,项目名称:scarlet,代码行数:27,代码来源:observation.py

示例5: _grad_add_models

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def _grad_add_models(upstream_grad, *models, full_model, slices, index):
    """Gradient for a single model

    The full model is just the sum of the models,
    so the gradient is 1 for each model,
    we just have to slice it appropriately.
    """
    model = models[index]
    full_model_slices = slices[index][0]
    model_slices = slices[index][1]

    def result(upstream_grad):
        _result = np.zeros(model.shape, dtype=model.dtype)
        _result[model_slices] = upstream_grad[full_model_slices]
        return _result
    return result 
开发者ID:pmelchior,项目名称:scarlet,代码行数:18,代码来源:component.py

示例6: get_treeseq_configs

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def get_treeseq_configs(treeseq, sampled_n):
    mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
    j = 0
    for i, n in enumerate(sampled_n):
        for _ in range(n):
            mat[i, j] = 1
            j += 1
    mat = scipy.sparse.csr_matrix(mat)

    def get_config(genos):
        derived_counts = mat.dot(genos)
        return np.array([
            sampled_n - derived_counts,
            derived_counts
        ]).T

    for v in treeseq.variants():
        yield get_config(v.genotypes) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:20,代码来源:demography.py

示例7: __init__

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def __init__(self, next_state, running_cost, final_cost,
                 umax, state_dim, pred_time=50):
        self.pred_time = pred_time
        self.umax = umax
        self.v = [0.0 for _ in range(pred_time + 1)]
        self.v_x = [np.zeros(state_dim) for _ in range(pred_time + 1)]
        self.v_xx = [np.zeros((state_dim, state_dim)) for _ in range(pred_time + 1)]
        self.f = next_state
        self.lf = final_cost
        self.lf_x = grad(self.lf)
        self.lf_xx = jacobian(self.lf_x)
        self.l_x = grad(running_cost, 0)
        self.l_u = grad(running_cost, 1)
        self.l_xx = jacobian(self.l_x, 0)
        self.l_uu = jacobian(self.l_u, 1)
        self.l_ux = jacobian(self.l_u, 0)
        self.f_x = jacobian(self.f, 0)
        self.f_u = jacobian(self.f, 1)
        self.f_xx = jacobian(self.f_x, 0)
        self.f_uu = jacobian(self.f_u, 1)
        self.f_ux = jacobian(self.f_u, 0) 
开发者ID:neka-nat,项目名称:ddp-gym,代码行数:23,代码来源:ddp_gym.py

示例8: init_model_params

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def init_model_params(Dx, Dy, alpha, r, obs, rs = npr.RandomState(0)):
    mu0 = np.zeros(Dx)
    Sigma0 = np.eye(Dx)
    
    A = np.zeros((Dx,Dx))
    for i in range(Dx):
        for j in range(Dx):
            A[i,j] = alpha**(abs(i-j)+1)
            
    Q = np.eye(Dx)
    C = np.zeros((Dy,Dx))
    if obs == 'sparse':
        C[:Dy,:Dy] = np.eye(Dy)
    else:
        C = rs.normal(size=(Dy,Dx))
    R = r * np.eye(Dy)
    
    return (mu0, Sigma0, A, Q, C, R) 
开发者ID:blei-lab,项目名称:variational-smc,代码行数:20,代码来源:lgss_example.py

示例9: generate_data

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def generate_data(model_params, T = 5, rs = npr.RandomState(0)):
    mu0, Sigma0, A, Q, C, R = model_params
    Dx = mu0.shape[0]
    Dy = R.shape[0]
    
    x_true = np.zeros((T,Dx))
    y_true = np.zeros((T,Dy))

    for t in range(T):
        if t > 0:
            x_true[t,:] = rs.multivariate_normal(np.dot(A,x_true[t-1,:]),Q)
        else:
            x_true[0,:] = rs.multivariate_normal(mu0,Sigma0)
        y_true[t,:] = rs.multivariate_normal(np.dot(C,x_true[t,:]),R)
        
    return x_true, y_true 
开发者ID:blei-lab,项目名称:variational-smc,代码行数:18,代码来源:lgss_example.py

示例10: train

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def train(self, X_train, F_train, y_train, batch_size=32, num_iters=1000, 
              lr=1e-3, param_scale=0.01, log_every=100, init_weights=None):
        grad_fun = build_batched_grad_fences(grad(self.objective), batch_size, 
                                             X_train, F_train, y_train)
        if init_weights is None:
            init_weights = self.init_weights(param_scale)
        saved_weights = np.zeros((num_iters, self.num_weights))

        def callback(weights, i, gradients):
            apl = self.average_path_length(weights, X_train, F_train, y_train)
            saved_weights[i, :] = weights
            loss_train = self.objective(weights, X_train, F_train, y_train)
            if i % log_every == 0: 
                print('model: gru | iter: {} | loss: {:.2f} | apl: {:.2f}'.format(i, loss_train, apl))

        optimized_weights = adam(grad_fun, init_weights, num_iters=num_iters, 
                                 step_size=lr, callback=callback)
        self.saved_weights = saved_weights
        self.weights = optimized_weights
        return optimized_weights 
开发者ID:dtak,项目名称:tree-regularization-public,代码行数:22,代码来源:train.py

示例11: project

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def project(vx, vy, occlusion):
    """Project the velocity field to be approximately mass-conserving,
       using a few iterations of Gauss-Seidel."""
    p = np.zeros(vx.shape)
    div = -0.5 * (np.roll(vx, -1, axis=1) - np.roll(vx, 1, axis=1)
                + np.roll(vy, -1, axis=0) - np.roll(vy, 1, axis=0))
    div = make_continuous(div, occlusion)

    for k in range(50):
        p = (div + np.roll(p, 1, axis=1) + np.roll(p, -1, axis=1)
                 + np.roll(p, 1, axis=0) + np.roll(p, -1, axis=0))/4.0
        p = make_continuous(p, occlusion)

    vx = vx - 0.5*(np.roll(p, -1, axis=1) - np.roll(p, 1, axis=1))
    vy = vy - 0.5*(np.roll(p, -1, axis=0) - np.roll(p, 1, axis=0))

    vx = occlude(vx, occlusion)
    vy = occlude(vy, occlusion)
    return vx, vy 
开发者ID:HIPS,项目名称:autograd,代码行数:21,代码来源:wing.py

示例12: test_assignment_raises_error

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def test_assignment_raises_error():
    def fun(A, b):
        A[1] = b
        return A
    A = npr.randn(5)
    with pytest.raises(TypeError):
        check_grads(fun)(A, 3.0)

# def test_nonscalar_output_1():
#     with pytest.raises(TypeError):
#         grad(lambda x: x * 2)(np.zeros(2))

# def test_nonscalar_output_2():
#     with pytest.raises(TypeError):
#         grad(lambda x: x * 2)(np.zeros(2))

# TODO:
# Diamond patterns
# Taking grad again after returning const
# Empty functions
# 2nd derivatives with fanout, thinking about the outgrad adder 
开发者ID:HIPS,项目名称:autograd,代码行数:23,代码来源:test_graphs.py

示例13: test_getter

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def test_getter():
    def fun(input_tuple):
        A = np.sum(input_tuple[0])
        B = np.sum(input_tuple[1])
        C = np.sum(input_tuple[1])
        return A + B + C

    d_fun = grad(fun)
    input_tuple = (npr.randn(5, 6),
                   npr.randn(4, 3),
                   npr.randn(2, 4))

    result = d_fun(input_tuple)
    assert np.allclose(result[0], np.ones((5, 6)))
    assert np.allclose(result[1], 2 * np.ones((4, 3)))
    assert np.allclose(result[2], np.zeros((2, 4))) 
开发者ID:HIPS,项目名称:autograd,代码行数:18,代码来源:test_tuple.py

示例14: test_getter

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def test_getter():
    def fun(input_dict):
        A = np.sum(input_dict['item_1'])
        B = np.sum(input_dict['item_2'])
        C = np.sum(input_dict['item_2'])
        return A + B + C

    d_fun = grad(fun)
    input_dict = {'item_1' : npr.randn(5, 6),
                  'item_2' : npr.randn(4, 3),
                  'item_X' : npr.randn(2, 4)}

    result = d_fun(input_dict)
    assert np.allclose(result['item_1'], np.ones((5, 6)))
    assert np.allclose(result['item_2'], 2 * np.ones((4, 3)))
    assert np.allclose(result['item_X'], np.zeros((2, 4))) 
开发者ID:HIPS,项目名称:autograd,代码行数:18,代码来源:test_dict.py

示例15: test_isinstance

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import zeros [as 别名]
def test_isinstance():
  def checker(ex, type_, truthval):
    assert isinstance(ex, type_) == truthval
    return 1.

  examples = [
      [list,          [[]],          [()]],
      [np.ndarray,    [np.zeros(1)], [[]]],
      [(tuple, list), [[], ()],      [np.zeros(1)]],
  ]

  for type_, positive_examples, negative_examples in examples:
    for ex in positive_examples:
      checker(ex, type_, True)
      grad(checker)(ex, type_, True)

    for ex in negative_examples:
      checker(ex, type_, False)
      grad(checker)(ex, type_, False) 
开发者ID:HIPS,项目名称:autograd,代码行数:21,代码来源:test_builtins.py


注:本文中的autograd.numpy.zeros方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。