当前位置: 首页>>代码示例>>Python>>正文


Python autograd.numpy方法代码示例

本文整理汇总了Python中autograd.numpy方法的典型用法代码示例。如果您正苦于以下问题:Python autograd.numpy方法的具体用法?Python autograd.numpy怎么用?Python autograd.numpy使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd的用法示例。


在下文中一共展示了autograd.numpy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_no_differentiable_parameters

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def test_no_differentiable_parameters(self):
        """If there are no differentiable parameters, the output of the gradient
        function is an empty tuple, and a warning is emitted."""
        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev, interface="autograd")
        def circuit(data1):
            qml.templates.AmplitudeEmbedding(data1, wires=[0, 1])
            return qml.expval(qml.PauliZ(0))

        grad_fn = qml.grad(circuit)
        data1 = qml.numpy.array([0, 1, 1, 0], requires_grad=False) / np.sqrt(2)

        with pytest.warns(UserWarning, match="Output seems independent of input"):
            res = grad_fn(data1)

        assert res == tuple() 
开发者ID:XanaduAI,项目名称:pennylane,代码行数:19,代码来源:test_autograd.py

示例2: test_call_changing_trainability

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def test_call_changing_trainability(self):
        """Test that trainability properly changes between QNode calls"""
        dev = qml.device("default.qubit", wires=2)

        @qml.qnode(dev, interface="autograd")
        def circuit(x, y, z):
            qml.RX(x, wires=0)
            qml.RY(y, wires=0)
            qml.RZ(z, wires=0)
            return qml.expval(qml.PauliZ(0))

        x = qml.numpy.array(1, requires_grad=True)
        y = qml.numpy.array(2, requires_grad=False)
        z = qml.numpy.array(3, requires_grad=True)

        res = circuit(x, y, z)

        assert circuit.get_trainable_args() == {0, 2}

        x.requires_grad = False
        y.requires_grad = True

        res = circuit(x, y, z)

        assert circuit.get_trainable_args() == {1, 2} 
开发者ID:XanaduAI,项目名称:pennylane,代码行数:27,代码来源:test_autograd.py

示例3: gradX_y

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def gradX_y(self, X, y):
        """
        Compute the gradient with respect to X (the first argument of the
        kernel). Base class provides a default autograd implementation for convenience.
        Subclasses should override if this does not work.

        X: nx x d numpy array.
        y: numpy array of length d.

        Return a numpy array G of size nx x d, the derivative of k(X, y) with
        respect to X.
        """
        yrow = np.reshape(y, (1, -1))
        f = lambda X: self.eval(X, yrow)
        g = autograd.elementwise_grad(f)
        G = g(X)
        assert G.shape[0] == X.shape[0]
        assert G.shape[1] == X.shape[1]
        return G

# end class KSTKernel 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:23,代码来源:kernel.py

示例4: gradX_Y

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def gradX_Y(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of X in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        D2 = util.dist2_matrix(X, Y)
        # 1d array of length nx
        Xi = X[:, dim]
        # 1d array of length ny
        Yi = Y[:, dim]
        # nx x ny
        dim_diff = Xi[:, np.newaxis] - Yi[np.newaxis, :]

        b = self.b
        c = self.c
        Gdim = ( 2.0*b*(c**2 + D2)**(b-1) )*dim_diff
        assert Gdim.shape[0] == X.shape[0]
        assert Gdim.shape[1] == Y.shape[0]
        return Gdim 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:25,代码来源:kernel.py

示例5: gradXY_sum

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def gradXY_sum(self, X, Y):
        """
        Compute
        \sum_{i=1}^d \frac{\partial^2 k(X, Y)}{\partial x_i \partial y_i}
        evaluated at each x_i in X, and y_i in Y.

        X: nx x d numpy array.
        Y: ny x d numpy array.

        Return a nx x ny numpy array of the derivatives.
        """
        b = self.b
        c = self.c
        D2 = util.dist2_matrix(X, Y)

        # d = input dimension
        d = X.shape[1]
        c2D2 = c**2 + D2
        T1 = -4.0*b*(b-1)*D2*(c2D2**(b-2) )
        T2 = -2.0*b*d*c2D2**(b-1)
        return T1 + T2

# end class KIMQ 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:25,代码来源:kernel.py

示例6: pair_gradX_Y

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def pair_gradX_Y(self, X, Y):
        """
        Compute the gradient with respect to X in k(X, Y), evaluated at the
        specified X and Y.

        X: n x d
        Y: n x d

        Return a numpy array of size n x d
        """
        sigma2 = self.sigma2
        Kvec = self.pair_eval(X, Y)
        # n x d
        Diff = X - Y
        G = -Kvec[:, np.newaxis]*Diff/sigma2
        return G 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:18,代码来源:kernel.py

示例7: pair_eval

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def pair_eval(self, X, Y):
        """
        Evaluate k(x1, y1), k(x2, y2), ...

        Parameters
        ----------
        X, Y : n x d numpy array

        Return
        -------
        a numpy array with length n
        """
        (n1, d1) = X.shape
        (n2, d2) = Y.shape
        assert n1==n2, 'Two inputs must have the same number of instances'
        assert d1==d2, 'Two inputs must have the same dimension'
        D2 = np.sum( (X-Y)**2, 1)
        Kvec = np.exp(old_div(-D2,(2.0*self.sigma2)))
        return Kvec 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:21,代码来源:kernel.py

示例8: gradY_X

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def gradY_X(self, X, Y, dim):
        """
        Compute the gradient with respect to the dimension dim of Y in k(X, Y).

        X: nx x d
        Y: ny x d

        Return a numpy array of size nx x ny.
        """
        gamma = 1/X.shape[1] if self.gamma is None else self.gamma

        if self.degree == 1:  # optimization, other expression is valid too
            out = gamma * X[:, dim, np.newaxis]  # nx x 1
            return np.repeat(out, Y.shape[0], axis=1)

        dot = np.dot(X, Y.T)
        return (self.degree * (gamma * dot + self.coef0) ** (self.degree - 1)
                * gamma * X[:, dim, np.newaxis]) 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:20,代码来源:kernel.py

示例9: __init__

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def __init__(self, means, variances, pmix=None):
        """
        means: a k x d 2d array specifying the means.
        variances: a k x d x d numpy array containing a stack of k covariance
            matrices, one for each mixture component.
        pmix: a one-dimensional length-k array of mixture weights. Sum to one.
        """
        k, d = means.shape
        if k != variances.shape[0]:
            raise ValueError('Number of components in means and variances do not match.')

        if pmix is None:
            pmix = old_div(np.ones(k),float(k))

        if np.abs(np.sum(pmix) - 1) > 1e-8:
            raise ValueError('Mixture weights do not sum to 1.')

        self.pmix = pmix
        self.means = means
        self.variances = variances 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:22,代码来源:density.py

示例10: grad_log

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def grad_log(self, X):
    #    """
    #    Evaluate the gradients (with respect to the input) of the log density at
    #    each of the n points in X. This is the score function.

    #    X: n x d numpy array.
        """
        Evaluate the gradients (with respect to the input) of the log density at
        each of the n points in X. This is the score function.

        X: n x d numpy array.

        Return an n x d numpy array of gradients.
        """
        XB = np.dot(X, self.B)
        Y = 0.5*XB + self.c
        E2y = np.exp(2*Y)
        # n x dh
        Phi = old_div((E2y-1.0),(E2y+1))
        # n x dx
        T = np.dot(Phi, 0.5*self.B.T)
        S = self.b - X + T
        return S 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:25,代码来源:density.py

示例11: __init__

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def __init__(self, p, gwidth2, test_locs, alpha=0.01, seed=28):
        """
        p: an instance of UnnormalizedDensity
        gwidth2: Gaussian width squared for the Gaussian kernel
        test_locs: J x d numpy array of J locations to test the difference
        alpha: significance level 
        """
        super(GaussMETest, self).__init__(p, alpha)
        self.gwidth2 = gwidth2
        self.test_locs = test_locs
        self.seed = seed
        ds = p.get_datasource()
        if ds is None:
            raise ValueError('%s test requires a density p which implements get_datasource(', str(GaussMETest))

        # Construct the ME test
        metest = tst.MeanEmbeddingTest(test_locs, gwidth2, alpha=alpha)
        self.metest = metest 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:20,代码来源:intertst.py

示例12: list_simulate_spectral

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def list_simulate_spectral(cov, J, n_simulate=1000, seed=82):
        """
        Simulate the null distribution using the spectrums of the covariance
        matrix.  This is intended to be used to approximate the null
        distribution.

        Return (a numpy array of simulated n*FSSD values, eigenvalues of cov)
        """
        # eigen decompose 
        eigs, _ = np.linalg.eig(cov)
        eigs = np.real(eigs)
        # sort in decreasing order 
        eigs = -np.sort(-eigs)
        sim_fssds = FSSD.simulate_null_dist(eigs, J, n_simulate=n_simulate,
                seed=seed)
        return sim_fssds, eigs 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:18,代码来源:goftest.py

示例13: __init__

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def __init__(self, p, k, bootstrapper=bootstrapper_rademacher, alpha=0.01,
            n_simulate=500, seed=11):
        """
        p: an instance of UnnormalizedDensity
        k: a KSTKernel object
        bootstrapper: a function: (n) |-> numpy array of n weights 
            to be multiplied in the double sum of the test statistic for generating 
            bootstrap samples from the null distribution.
        alpha: significance level 
        n_simulate: The number of times to simulate from the null distribution
            by bootstrapping. Must be a positive integer.
        """
        super(KernelSteinTest, self).__init__(p, alpha)
        self.k = k
        self.bootstrapper = bootstrapper
        self.n_simulate = n_simulate
        self.seed = seed 
开发者ID:wittawatj,项目名称:kernel-gof,代码行数:19,代码来源:goftest.py

示例14: batched_dot

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def batched_dot(a, b):
    if len(a.shape) != 3 or len(b.shape) != 3 or a.shape[0] != b.shape[0]:
        raise ValueError("a,b must be 3-dimensional arrays, with a.shape[0]==b.shape[0] and a.shape[2]==b.shape[1]")
    elif a.shape[0] == 1:
        ## use numpy.dot for blas
        a = np.reshape(a, a.shape[1:])
        b = np.reshape(b, b.shape[1:])
        c = np.dot(a, b)
        return np.reshape(c, [1] + list(c.shape))
    elif a.shape[2] == 1:
        ## the main cost is simply allocating space for the array,
        ## so we are better off doing things in serial
        a = np.reshape(a, a.shape[:-1])
        b = np.reshape(b, (b.shape[0], b.shape[2]))
        if a.shape[-1] > 1 and b.shape[-1] > 1:
            ## batch outer product
            return np.einsum("ij,ik->ijk", a, b)
        else:
            ## broadcasted elementary-wise multiplication
            outshape = (a.shape[0], a.shape[1], b.shape[1])
            a = np.transpose(a)
            b = np.transpose(b)
            if a.shape[0] == 1:
                a = np.reshape(a, [-1])
            if b.shape[0] == 1:
                b = np.reshape(b, [-1])
            return np.transpose(np.reshape(a*b, outshape[::-1]))
    else:
        ## parallel batched matrix multiply
        return _par_matmul(a, b) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:32,代码来源:einsum2.py

示例15: einsum2

# 需要导入模块: import autograd [as 别名]
# 或者: from autograd import numpy [as 别名]
def einsum2(*args, **kwargs):
    """
    einsum2(subscripts_str, arr0, arr1)
    or,
    einsum2(op0, subscript_list0, arr1, subscript_list1,
            output_subscript_list)

    This function is similar to einsum, except it only operates
    on two input arrays, does not allow diagonal operations
    (repeated subscripts on the same array), and requires the output
    subscripts to always be specified. Also, when specifying
    subscripts via lists, the subscripts can be arbitrary keys
    (unlike numpy.einsum, where they have to be integers).

    Unlike the standard einsum, einsum2 will perform computations
    in parallel. The number of parallel threads is selected automatically,
    but you can also control this with the environment variable
    OMP_NUM_THREADS.

    To perform the parallel computation, einsum2 will either use
    numpy.dot (if possible), otherwise it will use a parallel
    for loop. The advantage of using numpy.dot is that it
    uses BLAS which is much faster than a for loop. However,
    you need to make sure numpy is compiled against a parallel BLAS
    implementation such as MKL or OpenBlas. You won't need to worry
    about this for most packaged, precompiled versions of numpy
    (e.g. Anaconda Python).
    """
    if isinstance(args[0], str):
        subscripts, a, b = args[:3]
        ab_subs, out_subs = subscripts.split("->")
        a_subs, b_subs = ab_subs.split(",")
        return _einsum2(a, list(a_subs), b, list(b_subs), list(out_subs), *args[3:], **kwargs)
    else:
        return _einsum2(*args, **kwargs) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:37,代码来源:einsum2.py


注:本文中的autograd.numpy方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。