当前位置: 首页>>代码示例>>Python>>正文


Python numpy.matmul方法代码示例

本文整理汇总了Python中autograd.numpy.matmul方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.matmul方法的具体用法?Python numpy.matmul怎么用?Python numpy.matmul使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.matmul方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward_pass

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def forward_pass(self, X, hyp):     
        Q = self.hidden_dim
        H = np.zeros((X.shape[1],Q))
        
        idx_1 = 0
        idx_2 = idx_1 + self.X_dim*Q
        idx_3 = idx_2 + Q
        idx_4 = idx_3 + Q*Q
        U = np.reshape(hyp[idx_1:idx_2], (self.X_dim,Q))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q))
        W = np.reshape(hyp[idx_3:idx_4], (Q,Q))
        
        for i in range(0, self.lags):
            H = activation(np.matmul(H,W) + np.matmul(X[i,:,:],U) + b)
                
        idx_1 = idx_4
        idx_2 = idx_1 + Q*self.Y_dim
        idx_3 = idx_2 + self.Y_dim
        V = np.reshape(hyp[idx_1:idx_2], (Q,self.Y_dim))
        c = np.reshape(hyp[idx_2:idx_3], (1,self.Y_dim))
        Y = np.matmul(H,V) + c
        
        return Y 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:25,代码来源:RecurrentNeuralNetworks.py

示例2: forward_pass

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def forward_pass(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b
                
        return mu 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:22,代码来源:NeuralNetworks.py

示例3: backward

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def backward(self, x_seq, u_seq):
        self.v[-1] = self.lf(x_seq[-1])
        self.v_x[-1] = self.lf_x(x_seq[-1])
        self.v_xx[-1] = self.lf_xx(x_seq[-1])
        k_seq = []
        kk_seq = []
        for t in range(self.pred_time - 1, -1, -1):
            f_x_t = self.f_x(x_seq[t], u_seq[t])
            f_u_t = self.f_u(x_seq[t], u_seq[t])
            q_x = self.l_x(x_seq[t], u_seq[t]) + np.matmul(f_x_t.T, self.v_x[t + 1])
            q_u = self.l_u(x_seq[t], u_seq[t]) + np.matmul(f_u_t.T, self.v_x[t + 1])
            q_xx = self.l_xx(x_seq[t], u_seq[t]) + \
              np.matmul(np.matmul(f_x_t.T, self.v_xx[t + 1]), f_x_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_xx(x_seq[t], u_seq[t])))
            tmp = np.matmul(f_u_t.T, self.v_xx[t + 1])
            q_uu = self.l_uu(x_seq[t], u_seq[t]) + np.matmul(tmp, f_u_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_uu(x_seq[t], u_seq[t])))
            q_ux = self.l_ux(x_seq[t], u_seq[t]) + np.matmul(tmp, f_x_t) + \
              np.dot(self.v_x[t + 1], np.squeeze(self.f_ux(x_seq[t], u_seq[t])))
            inv_q_uu = np.linalg.inv(q_uu)
            k = -np.matmul(inv_q_uu, q_u)
            kk = -np.matmul(inv_q_uu, q_ux)
            dv = 0.5 * np.matmul(q_u, k)
            self.v[t] += dv
            self.v_x[t] = q_x - np.matmul(np.matmul(q_u, inv_q_uu), q_ux)
            self.v_xx[t] = q_xx + np.matmul(q_ux.T, kk)
            k_seq.append(k)
            kk_seq.append(kk)
        k_seq.reverse()
        kk_seq.reverse()
        return k_seq, kk_seq 
开发者ID:neka-nat,项目名称:ddp-gym,代码行数:33,代码来源:ddp_gym.py

示例4: forward

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def forward(self, x_seq, u_seq, k_seq, kk_seq):
        x_seq_hat = np.array(x_seq)
        u_seq_hat = np.array(u_seq)
        for t in range(len(u_seq)):
            control = k_seq[t] + np.matmul(kk_seq[t], (x_seq_hat[t] - x_seq[t]))
            u_seq_hat[t] = np.clip(u_seq[t] + control, -self.umax, self.umax)
            x_seq_hat[t + 1] = self.f(x_seq_hat[t], u_seq_hat[t])
        return x_seq_hat, u_seq_hat 
开发者ID:neka-nat,项目名称:ddp-gym,代码行数:10,代码来源:ddp_gym.py

示例5: taylor_approx

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def taylor_approx(target, stencil, values):
  """Use taylor series to approximate up to second order derivatives.

  Args:
    target: An array of shape (..., n), a batch of n-dimensional points
      where one wants to approximate function value and derivatives.
    stencil: An array of shape broadcastable to (..., k, n), for each target
      point a set of k = triangle(n + 1) points to use on its approximation.
    values: An array of shape broadcastable to (..., k), the function value at
      each of the stencil points.

  Returns:
    An array of shape (..., k), for each target point the approximated
    function value, gradient and hessian evaluated at that point (flattened
    and in the same order as returned by derivative_names).
  """
  # Broadcast arrays to their required shape.
  batch_shape, ndim = target.shape[:-1], target.shape[-1]
  stencil = np.broadcast_to(stencil, batch_shape + (triangular(ndim + 1), ndim))
  values = np.broadcast_to(values, stencil.shape[:-1])

  # Subtract target from each stencil point.
  delta_x = stencil - np.expand_dims(target, axis=-2)
  delta_xy = np.matmul(
      np.expand_dims(delta_x, axis=-1), np.expand_dims(delta_x, axis=-2))
  i = np.arange(ndim)
  j, k = np.triu_indices(ndim, k=1)

  # Build coefficients for the Taylor series equations, namely:
  #   f(stencil) = coeffs @ [f(target), df/d0(target), ...]
  coeffs = np.concatenate([
      np.ones(delta_x.shape[:-1] + (1,)),  # f(target)
      delta_x,  # df/di(target)
      delta_xy[..., i, i] / 2,  # d^2f/di^2(target)
      delta_xy[..., j, k],  # d^2f/{dj dk}(target)
  ], axis=-1)

  # Then: [f(target), df/d0(target), ...] = coeffs^{-1} @ f(stencil)
  return np.squeeze(
      np.matmul(np.linalg.inv(coeffs), values[..., np.newaxis]), axis=-1) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:42,代码来源:methods.py

示例6: generalized_outer_product

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def generalized_outer_product(x):
    if np.ndim(x) == 1:
        return np.outer(x, x)
    return np.matmul(x, np.swapaxes(x, -1, -2)) 
开发者ID:HIPS,项目名称:autograd,代码行数:6,代码来源:multivariate_normal.py

示例7: covgrad

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def covgrad(x, mean, cov, allow_singular=False):
    if allow_singular:
        raise NotImplementedError("The multivariate normal pdf is not "
                "differentiable w.r.t. a singular covariance matix")
    J = np.linalg.inv(cov)
    solved = np.matmul(J, np.expand_dims(x - mean, -1))
    return 1./2 * (generalized_outer_product(solved) - J) 
开发者ID:HIPS,项目名称:autograd,代码行数:9,代码来源:multivariate_normal.py

示例8: test_matmul

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def test_matmul(): combo_check(np.matmul, [0, 1])(
                               [R(3), R(2, 3), R(2, 2, 3), C(3), C(2, 3)],
                               [R(3), R(3, 4), R(2, 3, 4), C(3), C(3, 4)]) 
开发者ID:HIPS,项目名称:autograd,代码行数:5,代码来源:test_systematic.py

示例9: test_matmul_broadcast

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def test_matmul_broadcast(): combo_check(np.matmul, [0, 1])([R(1, 2, 2)], [R(3, 2, 1)]) 
开发者ID:HIPS,项目名称:autograd,代码行数:3,代码来源:test_systematic.py

示例10: neural_net

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def neural_net(self, X, Q, hyp):
        H = X
        idx_3 = 0
        layers = Q.shape[0]   
        for layer in range(0,layers-2):        
            idx_1 = idx_3
            idx_2 = idx_1 + Q[layer]*Q[layer+1]
            idx_3 = idx_2 + Q[layer+1]
            A = np.reshape(hyp[idx_1:idx_2], (Q[layer],Q[layer+1]))
            b = np.reshape(hyp[idx_2:idx_3], (1,Q[layer+1]))
            H = activation(np.matmul(H,A) + b)
            
        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        mu = np.matmul(H,A) + b

        idx_1 = idx_3
        idx_2 = idx_1 + Q[-2]*Q[-1]
        idx_3 = idx_2 + Q[-1]
        A = np.reshape(hyp[idx_1:idx_2], (Q[-2],Q[-1]))
        b = np.reshape(hyp[idx_2:idx_3], (1,Q[-1]))
        Sigma = np.exp(np.matmul(H,A) + b)
        
        return mu, Sigma 
开发者ID:maziarraissi,项目名称:DeepLearningTutorial,代码行数:29,代码来源:VariationalAutoencoders.py

示例11: likelihood

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def likelihood(self, hyp): 
        M = self.M 
        Z = self.Z
        m = self.m
        S = self.S
        X_batch = self.X_batch
        y_batch = self.y_batch
        jitter = self.jitter 
        jitter_cov = self.jitter_cov
        N = X_batch.shape[0]
        
        
        logsigma_n = hyp[-1]
        sigma_n = np.exp(logsigma_n)
        
        # Compute K_u_inv
        K_u = kernel(Z, Z, hyp[:-1])    
        # K_u_inv = np.linalg.solve(K_u + np.eye(M)*jitter_cov, np.eye(M))
        L = np.linalg.cholesky(K_u + np.eye(M)*jitter_cov)    
        K_u_inv = np.linalg.solve(L.T, np.linalg.solve(L,np.eye(M)))
        
        self.K_u_inv = K_u_inv
          
        # Compute mu
        psi = kernel(Z, X_batch, hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        MU = np.matmul(psi.T,K_u_inv_m)
        
        # Compute cov
        Alpha = np.matmul(K_u_inv,psi)
        COV = kernel(X_batch, X_batch, hyp[:-1]) - np.matmul(psi.T, np.matmul(K_u_inv,psi)) + \
                np.matmul(Alpha.T, np.matmul(S,Alpha))
        
        COV_inv = np.linalg.solve(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter, np.eye(N))
        # L = np.linalg.cholesky(COV  + np.eye(N)*sigma_n + np.eye(N)*jitter) 
        # COV_inv = np.linalg.solve(np.transpose(L), np.linalg.solve(L,np.eye(N)))
        
        # Compute cov(Z, X)
        cov_ZX = np.matmul(S,Alpha)
        
        # Update m and S
        alpha = np.matmul(COV_inv, cov_ZX.T)
        m = m + np.matmul(cov_ZX, np.matmul(COV_inv, y_batch-MU))    
        S = S - np.matmul(cov_ZX, alpha)
        
        self.m = m
        self.S = S
        
        # Compute NLML                
        K_u_inv_m = np.matmul(K_u_inv,m)
        NLML = 0.5*np.matmul(m.T,K_u_inv_m) + np.sum(np.log(np.diag(L))) + 0.5*np.log(2.*np.pi)*M
        
        return NLML[0,0] 
开发者ID:maziarraissi,项目名称:ParametricGP,代码行数:55,代码来源:parametric_GP.py

示例12: predict

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import matmul [as 别名]
def predict(self, X_star):
        Z = self.Z
        m = self.m.value
        S = self.S.value
        hyp = self.hyp
        K_u_inv = self.K_u_inv
        
        N_star = X_star.shape[0]
        partitions_size = 10000
        (number_of_partitions, remainder_partition) = divmod(N_star, partitions_size)
        
        mean_star = np.zeros((N_star,1));
        var_star = np.zeros((N_star,1));
        
        for partition in range(0,number_of_partitions):
            print("Predicting partition: %d" % (partition))
            idx_1 = partition*partitions_size
            idx_2 = (partition+1)*partitions_size
            
            # Compute mu
            psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
            K_u_inv_m = np.matmul(K_u_inv,m)   
            mu = np.matmul(psi.T,K_u_inv_m)
            
            mean_star[idx_1:idx_2,0:1] = mu;        
        
            # Compute cov  
            Alpha = np.matmul(K_u_inv,psi)
            cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                    np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
            var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
            
            var_star[idx_1:idx_2,0] = var
    
        print("Predicting the last partition")
        idx_1 = number_of_partitions*partitions_size
        idx_2 = number_of_partitions*partitions_size + remainder_partition
        
        # Compute mu
        psi = kernel(Z, X_star[idx_1:idx_2,:], hyp[:-1])    
        K_u_inv_m = np.matmul(K_u_inv,m)   
        mu = np.matmul(psi.T,K_u_inv_m)
        
        mean_star[idx_1:idx_2,0:1] = mu;        
    
        # Compute cov  
        Alpha = np.matmul(K_u_inv,psi)
        cov = kernel(X_star[idx_1:idx_2,:], X_star[idx_1:idx_2,:], hyp[:-1]) - \
                np.matmul(psi.T, np.matmul(K_u_inv,psi)) + np.matmul(Alpha.T, np.matmul(S,Alpha))
        var = np.abs(np.diag(cov))# + np.exp(hyp[-1])
        
        var_star[idx_1:idx_2,0] = var
        
        
        return mean_star, var_star 
开发者ID:maziarraissi,项目名称:ParametricGP,代码行数:57,代码来源:parametric_GP.py


注:本文中的autograd.numpy.matmul方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。