当前位置: 首页>>代码示例>>Python>>正文


Python numpy.dot方法代码示例

本文整理汇总了Python中autograd.numpy.dot方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.dot方法的具体用法?Python numpy.dot怎么用?Python numpy.dot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: objective

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def objective(self, w):
        obj = 0
        N = float(sum([np.sum(d[1]) for d in self.data_list]))
        for F,S in self.data_list:
            psi = np.dot(F, w)
            lam = self.link(psi)
            obj -= np.sum(S * np.log(lam) -lam*self.dt) / N
            # assert np.isfinite(ll)

        # Add penalties
        obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N
        obj += np.sum(np.abs(w[1:]) * self.lmbda) / N

        # assert np.isfinite(obj)

        return obj 
开发者ID:slinderman,项目名称:pyhawkes,代码行数:18,代码来源:standard_models.py

示例2: check_gradient

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def check_gradient(f, x):
    print(x, "\n", f(x))

    print("# grad2")
    grad2 = Gradient(f)(x)
    print("# building grad1")
    g = grad(f)
    print("# computing grad1")
    grad1 = g(x)

    print("gradient1\n", grad1, "\ngradient2\n", grad2)
    np.allclose(grad1, grad2)

    # check Hessian vector product
    y = np.random.normal(size=x.shape)
    gdot = lambda u: np.dot(g(u), y)
    hess1, hess2 = grad(gdot)(x), Gradient(gdot)(x)
    print("hess1\n", hess1, "\nhess2\n", hess2)
    np.allclose(hess1, hess2) 
开发者ID:popgenmethods,项目名称:momi2,代码行数:21,代码来源:test_autograd.py

示例3: generate_data

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def generate_data(model_params, T = 5, rs = npr.RandomState(0)):
    mu0, Sigma0, A, Q, C, R = model_params
    Dx = mu0.shape[0]
    Dy = R.shape[0]
    
    x_true = np.zeros((T,Dx))
    y_true = np.zeros((T,Dy))

    for t in range(T):
        if t > 0:
            x_true[t,:] = rs.multivariate_normal(np.dot(A,x_true[t-1,:]),Q)
        else:
            x_true[0,:] = rs.multivariate_normal(mu0,Sigma0)
        y_true[t,:] = rs.multivariate_normal(np.dot(C,x_true[t,:]),R)
        
    return x_true, y_true 
开发者ID:blei-lab,项目名称:variational-smc,代码行数:18,代码来源:lgss_example.py

示例4: average_path_length

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def average_path_length(tree, X):
    """Compute average path length: cost of simulating the average
    example; this is used in the objective function.

    @param tree: DecisionTreeClassifier instance
    @param X: NumPy array (D x N)
              D := number of dimensions
              N := number of examples
    @return path_length: float
                         average path length
    """
    leaf_indices = tree.apply(X)
    leaf_counts = np.bincount(leaf_indices)
    leaf_i = np.arange(tree.tree_.node_count)
    path_length = np.dot(leaf_i, leaf_counts) / float(X.shape[0])
    return path_length 
开发者ID:dtak,项目名称:tree-regularization-public,代码行数:18,代码来源:train.py

示例5: log_prior

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def log_prior(self):
        # Normal N(mu | mu_0, Sigma / kappa_0)
        from scipy.linalg import solve_triangular
        sigma = np.linalg.inv(self.J_0)
        mu = sigma.dot(self.h_0)
        S_chol = np.linalg.cholesky(sigma)

        # Stack log pi and W
        X = np.vstack((self.logpi, self.W)).T

        lp = 0
        for d in range(self.D_out):
            x = solve_triangular(S_chol, X[d] - mu, lower=True)
            lp += -1. / 2. * np.dot(x, x) \
                  - self.D_in / 2 * np.log(2 * np.pi) \
                  - np.log(S_chol.diagonal()).sum()

        return lp

    ### HMC 
开发者ID:slinderman,项目名称:recurrent-slds,代码行数:22,代码来源:transitions.py

示例6: get_log_trans_matrices

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def get_log_trans_matrices(self, X):
        """
        Get log transition matrices as a function of X

        :param X: inputs/covariates
        :return: stack of transition matrices log A[t] \in Kin x Kout
        """
        # compute the contribution of the covariate to transition matrix
        psi_X = np.dot(X, self.W)

        # add the (T x Kout) and (Kin x Kout) matrices together such that they
        # broadcast into a (T x Kin x Kout) stack of matrices
        psi = psi_X[:, None, :] + self.logpi

        # apply softmax and normalize over outputs
        log_trans_matrices = psi - amisc.logsumexp(psi, axis=2, keepdims=True)

        return log_trans_matrices 
开发者ID:slinderman,项目名称:recurrent-slds,代码行数:20,代码来源:transitions.py

示例7: joint_log_probability

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def joint_log_probability(self, logpi, W, stateseqs, covseqs):
        K, D = self.num_states, self.covariate_dim

        # Compute the objective
        ll = 0
        for z, x in zip(stateseqs, covseqs):
            T = z.size
            assert x.ndim == 2 and x.shape[0] == T - 1
            z_prev = one_hot(z[:-1], K)
            z_next = one_hot(z[1:], K)

            # Numerator
            tmp = anp.dot(z_prev, logpi) + anp.dot(x, W)
            ll += anp.sum(tmp * z_next)

            # Denominator
            Z = amisc.logsumexp(tmp, axis=1)
            ll -= anp.sum(Z)

        return ll 
开发者ID:slinderman,项目名称:recurrent-slds,代码行数:22,代码来源:transitions.py

示例8: make_nn_funs

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def make_nn_funs(input_shape, layer_specs, L2_reg):
    parser = WeightsParser()
    cur_shape = input_shape
    for layer in layer_specs:
        N_weights, cur_shape = layer.build_weights_dict(cur_shape)
        parser.add_weights(layer, (N_weights,))

    def predictions(W_vect, inputs):
        """Outputs normalized log-probabilities.
        shape of inputs : [data, color, y, x]"""
        cur_units = inputs
        for layer in layer_specs:
            cur_weights = parser.get(W_vect, layer)
            cur_units = layer.forward_pass(cur_units, cur_weights)
        return cur_units

    def loss(W_vect, X, T):
        log_prior = -L2_reg * np.dot(W_vect, W_vect)
        log_lik = np.sum(predictions(W_vect, X) * T)
        return - log_prior - log_lik

    def frac_err(W_vect, X, T):
        return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))

    return parser.N, predictions, loss, frac_err 
开发者ID:HIPS,项目名称:autograd,代码行数:27,代码来源:convnet.py

示例9: setup

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def setup(self):
        self.batch_size = 16
        self.dtype = "float32"
        self.D = 2**10
        self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype)
        self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype)
        self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype)
        self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype)
        self.bout = 0.01 * np.random.randn(1).astype(self.dtype)
        self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype)
        self.n = 50

        def autograd_rnn(params, x, label, n):
            W, b, Wout, bout = params
            h1 = x
            for i in range(n):
                h1 = np.tanh(np.dot(h1, W) + b)
            logit = np.dot(h1, Wout) + bout
            loss = -np.sum(label * logit - (
                    logit + np.log(1 + np.exp(-logit))))
            return loss

        self.fn = autograd_rnn
        self.grad_fn = grad(self.fn) 
开发者ID:HIPS,项目名称:autograd,代码行数:26,代码来源:bench_rnn.py

示例10: test_grad_and_aux

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)

## No longer support this behavior
# def test_make_ggnvp_broadcasting():
#   A = npr.randn(4, 5)
#   x = npr.randn(10, 4)
#   v = npr.randn(10, 4)

#   fun = lambda x: np.tanh(np.dot(x, A))
#   res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
#   res2 = make_ggnvp(fun)(x)(v)
#   check_equivalent(res1, res2) 
开发者ID:HIPS,项目名称:autograd,代码行数:24,代码来源:test_wrappers.py

示例11: optimize

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def optimize(self, x0, target):
        """Calculate an optimum argument of an objective function."""
        x = x0
        for i in range(self.maxiter):
            g = self.g(x, target)
            h = self.h(x, target)
            if i == 0:
                alpha = 0
                m = g
            else:
                alpha = - np.dot(m, np.dot(h, g)) / np.dot(m, np.dot(h, m))
                m = g + np.dot(alpha, m)
            t = - np.dot(m, g) / np.dot(m, np.dot(h, m))
            delta = np.dot(t, m)
            x = x + delta
            if np.linalg.norm(delta) < self.tol:
                break
        return x 
开发者ID:lanius,项目名称:tinyik,代码行数:20,代码来源:optimizer.py

示例12: kernel

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def kernel(X, Xp, hyp):
    output_scale = np.exp(hyp[0])
    lengthscales = np.sqrt(np.exp(hyp[1:]))
    X = X/lengthscales
    Xp = Xp/lengthscales
    X_SumSquare = np.sum(np.square(X),axis=1);
    Xp_SumSquare = np.sum(np.square(Xp),axis=1);
    mul = np.dot(X,Xp.T);
    dists = X_SumSquare[:,np.newaxis]+Xp_SumSquare-2.0*mul
    return output_scale * np.exp(-0.5 * dists) 
开发者ID:maziarraissi,项目名称:ParametricGP,代码行数:12,代码来源:Utilities.py

示例13: predict_kaf_nn

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def predict_kaf_nn(w, X, info):
    """
    Compute the outputs of a KAF feedforward network.
    """
    
    D, gamma = info
    for W, b, alpha in w:
        outputs = np.dot(X, W) + b
        K = gauss_kernel(outputs, D, gamma)
        X = np.sum(K*alpha, axis=2)
    return X 
开发者ID:ispamm,项目名称:kernel-activation-functions,代码行数:13,代码来源:kafnets.py

示例14: dist

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def dist(x1, x2):
    """ Compute squared euclidean distance between samples (autograd)
    """
    x1p2 = np.sum(np.square(x1), 1)
    x2p2 = np.sum(np.square(x2), 1)
    return x1p2.reshape((-1, 1)) + x2p2.reshape((1, -1)) - 2 * np.dot(x1, x2.T) 
开发者ID:PythonOT,项目名称:POT,代码行数:8,代码来源:dr.py

示例15: sinkhorn

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import dot [as 别名]
def sinkhorn(w1, w2, M, reg, k):
    """Sinkhorn algorithm with fixed number of iteration (autograd)
    """
    K = np.exp(-M / reg)
    ui = np.ones((M.shape[0],))
    vi = np.ones((M.shape[1],))
    for i in range(k):
        vi = w2 / (np.dot(K.T, ui))
        ui = w1 / (np.dot(K, vi))
    G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1]))
    return G 
开发者ID:PythonOT,项目名称:POT,代码行数:13,代码来源:dr.py


注:本文中的autograd.numpy.dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。