当前位置: 首页>>代码示例>>Python>>正文


Python numpy.sum方法代码示例

本文整理汇总了Python中numpy.sum方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.sum方法的具体用法?Python numpy.sum怎么用?Python numpy.sum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.sum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_intercept

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def add_intercept(self, X):
        """Add 1's to data as last features."""
        # Data shape
        N, D = X.shape

        # Check if there's not already an intercept column
        if np.any(np.sum(X, axis=0) == N):

            # Report
            print('Intercept is not the last feature. Swapping..')

            # Find which column contains the intercept
            intercept_index = np.argwhere(np.sum(X, axis=0) == N)

            # Swap intercept to last
            X = X[:, np.setdiff1d(np.arange(D), intercept_index)]

        # Add intercept as last column
        X = np.hstack((X, np.ones((N, 1))))

        # Append column of 1's to data, and increment dimensionality
        return X, D+1 
开发者ID:wmkouw,项目名称:libTLDA,代码行数:24,代码来源:tcpr.py

示例2: find_match

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def find_match(self, pred, gt):
    '''
    Match component to balls.
    '''
    batch_size, n_frames_input, n_components, _ = pred.shape
    diff = pred.reshape(batch_size, n_frames_input, n_components, 1, 2) - \
               gt.reshape(batch_size, n_frames_input, 1, n_components, 2)
    diff = np.sum(np.sum(diff ** 2, axis=-1), axis=1)
    # Direct indices
    indices = np.argmin(diff, axis=2)
    ambiguous = np.zeros(batch_size, dtype=np.int8)
    for i in range(batch_size):
      _, counts = np.unique(indices[i], return_counts=True)
      if not np.all(counts == 1):
        ambiguous[i] = 1
    return indices, ambiguous 
开发者ID:jthsieh,项目名称:DDPAE-video-prediction,代码行数:18,代码来源:metrics.py

示例3: test_bounds

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def test_bounds(self):
        """
        Test that out-of-bounds coordinates return NaN reddening, and that
        in-bounds coordinates do not return NaN reddening.
        """

        for mode in (['random_sample', 'random_sample_per_pix',
                      'median', 'samples', 'mean']):
            # Draw random coordinates, both above and below dec = -30 degree line
            n_pix = 1000
            ra = -180. + 360.*np.random.random(n_pix)
            dec = -75. + 90.*np.random.random(n_pix)    # 45 degrees above/below
            c = coords.SkyCoord(ra, dec, frame='icrs', unit='deg')

            ebv_calc = self._bayestar(c, mode=mode)

            nan_below = np.isnan(ebv_calc[dec < -35.])
            nan_above = np.isnan(ebv_calc[dec > -25.])
            pct_nan_above = np.sum(nan_above) / float(nan_above.size)

            # print r'{:s}: {:.5f}% nan above dec=-25 deg.'.format(mode, 100.*pct_nan_above)

            self.assertTrue(np.all(nan_below))
            self.assertTrue(pct_nan_above < 0.05) 
开发者ID:gregreen,项目名称:dustmaps,代码行数:26,代码来源:test_bayestar.py

示例4: test_bounds

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def test_bounds(self):
        """
        Test that out-of-bounds coordinates return NaN reddening, and that
        in-bounds coordinates do not return NaN reddening.
        """

        for mode in (['random_sample', 'random_sample_per_pix',
                      'median', 'samples', 'mean']):
            # Draw random coordinates on the sphere
            n_pix = 10000
            u, v = np.random.random((2,n_pix))
            l = 360. * u
            b = 90. - np.degrees(np.arccos(2.*v - 1.))
            c = coords.SkyCoord(l, b, frame='galactic', unit='deg')

            A_calc = self._iphas(c, mode=mode)

            in_bounds = (l > 32.) & (l < 213.) & (b < 4.5) & (b > -4.5)
            out_of_bounds = (l < 28.) | (l > 217.) | (b > 7.) | (b < -7.)

            n_nan_in_bounds = np.sum(np.isnan(A_calc[in_bounds]))
            n_finite_out_of_bounds = np.sum(np.isfinite(A_calc[out_of_bounds]))

            self.assertTrue(n_nan_in_bounds == 0)
            self.assertTrue(n_finite_out_of_bounds == 0) 
开发者ID:gregreen,项目名称:dustmaps,代码行数:27,代码来源:test_iphas.py

示例5: solve_modal

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def solve_modal(model,k:int):
    """
    Solve eigen mode of the MDOF system
    
    params:
        model: FEModel.
        k: number of modes to extract.
    """
    K_,M_=model.K_,model.M_
    if k>model.DOF:
        logger.info('Warning: the modal number to extract is larger than the system DOFs, only %d modes are available'%model.DOF)
        k=model.DOF
    omega2s,modes = sl.eigsh(K_,k,M_,sigma=0,which='LM')
    delta = modes/np.sum(modes,axis=0)
    model.is_solved=True
    model.mode_=delta
    model.omega_=np.sqrt(omega2s).reshape((k,1)) 
开发者ID:zhuoju36,项目名称:StructEngPy,代码行数:19,代码来源:dynamic.py

示例6: _load_data

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def _load_data():

    dfTrain = pd.read_csv(config.TRAIN_FILE)
    dfTest = pd.read_csv(config.TEST_FILE)

    def preprocess(df):
        cols = [c for c in df.columns if c not in ["id", "target"]]
        df["missing_feat"] = np.sum((df[cols] == -1).values, axis=1)
        df["ps_car_13_x_ps_reg_03"] = df["ps_car_13"] * df["ps_reg_03"]
        return df

    dfTrain = preprocess(dfTrain)
    dfTest = preprocess(dfTest)

    cols = [c for c in dfTrain.columns if c not in ["id", "target"]]
    cols = [c for c in cols if (not c in config.IGNORE_COLS)]

    X_train = dfTrain[cols].values
    y_train = dfTrain["target"].values
    X_test = dfTest[cols].values
    ids_test = dfTest["id"].values
    cat_features_indices = [i for i,c in enumerate(cols) if c in config.CATEGORICAL_COLS]

    return dfTrain, dfTest, X_train, y_train, X_test, ids_test, cat_features_indices 
开发者ID:ChenglongChen,项目名称:tensorflow-DeepFM,代码行数:26,代码来源:main.py

示例7: _prepro_cpg

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
开发者ID:kipoi,项目名称:models,代码行数:24,代码来源:dataloader_m.py

示例8: _avg_embed

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def _avg_embed(self, embed_dict, words_dict):
        """
        :param embed_dict:
        :param words_dict:
        """
        print("loading pre_train embedding by avg for out of vocabulary.")
        embeddings = np.zeros((int(self.words_count), int(self.dim)))
        inword_list = {}
        for word in words_dict:
            if word in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.exact_count += 1
            elif word.lower() in embed_dict:
                embeddings[words_dict[word]] = np.array([float(i) for i in embed_dict[word.lower()]], dtype='float32')
                inword_list[words_dict[word]] = 1
                self.fuzzy_count += 1
            else:
                self.oov_count += 1
        sum_col = np.sum(embeddings, axis=0) / len(inword_list)  # avg
        for i in range(len(words_dict)):
            if i not in inword_list and i != self.padID:
                embeddings[i] = sum_col
        final_embed = torch.from_numpy(embeddings).float()
        return final_embed 
开发者ID:bamtercelboo,项目名称:pytorch_NER_BiLSTM_CNN_CRF,代码行数:27,代码来源:Embed.py

示例9: loss

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def loss(self, x, t):
        """求损失函数
        Parameters
        ----------
        x : 输入数据
        t : 教师标签
        Returns
        -------
        损失函数的值
        """
        y = self.predict(x)

        weight_decay = 0
        for idx in range(1, self.hidden_layer_num + 2):
            W = self.params['W' + str(idx)]
            weight_decay += 0.5 * self.weight_decay_lambda * np.sum(W ** 2)

        return self.last_layer.forward(y, t) + weight_decay 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:20,代码来源:multi_layer_net.py

示例10: cost0

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def cost0(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    return J 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:24,代码来源:5_nueral_network.py

示例11: cost

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def cost(params, input_size, hidden_size, num_labels, X, y, learning_rate):
    m = X.shape[0]
    X = np.matrix(X)
    y = np.matrix(y)
    
    # reshape the parameter array into parameter matrices for each layer
    theta1 = np.matrix(np.reshape(params[:hidden_size * (input_size + 1)], (hidden_size, (input_size + 1))))
    theta2 = np.matrix(np.reshape(params[hidden_size * (input_size + 1):], (num_labels, (hidden_size + 1))))
    
    # run the feed-forward pass
    a1, z2, a2, z3, h = forward_propagate(X, theta1, theta2)
    
    # compute the cost
    J = 0
    for i in range(m):
        first_term = np.multiply(-y[i,:], np.log(h[i,:]))
        second_term = np.multiply((1 - y[i,:]), np.log(1 - h[i,:]))
        J += np.sum(first_term - second_term)
    
    J = J / m
    
    # add the cost regularization term
    J += (float(learning_rate) / (2 * m)) * (np.sum(np.power(theta1[:,1:], 2)) + np.sum(np.power(theta2[:,1:], 2)))
    
    return J 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:27,代码来源:5_nueral_network.py

示例12: select_threshold

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def select_threshold(pval, yval):
    best_epsilon = 0
    best_f1 = 0
    f1 = 0
    
    step = (pval.max() - pval.min()) / 1000
    
    for epsilon in np.arange(pval.min(), pval.max(), step):
        preds = pval < epsilon
        
        tp = np.sum(np.logical_and(preds == 1, yval == 1)).astype(float)
        fp = np.sum(np.logical_and(preds == 1, yval == 0)).astype(float)
        fn = np.sum(np.logical_and(preds == 0, yval == 1)).astype(float)
        
        precision = tp / (tp + fp)
        recall = tp / (tp + fn)
        f1 = (2 * precision * recall) / (precision + recall)
        
        if f1 > best_f1:
            best_f1 = f1
            best_epsilon = epsilon
    
    return best_epsilon, best_f1 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:25,代码来源:9_anomaly_and_rec.py

示例13: cost

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def cost(params, Y, R, num_features):
    Y = np.matrix(Y)  # (1682, 943)
    R = np.matrix(R)  # (1682, 943)
    num_movies = Y.shape[0]
    num_users = Y.shape[1]
    
    # reshape the parameter array into parameter matrices
    X = np.matrix(np.reshape(params[:num_movies * num_features], (num_movies, num_features)))  # (1682, 10)
    Theta = np.matrix(np.reshape(params[num_movies * num_features:], (num_users, num_features)))  # (943, 10)
    
    # initializations
    J = 0
    
    # compute the cost
    error = np.multiply((X * Theta.T) - Y, R)  # (1682, 943)
    squared_error = np.power(error, 2)  # (1682, 943)
    J = (1. / 2) * np.sum(squared_error)
    
    return J 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:21,代码来源:9_anomaly_and_rec.py

示例14: gradientReg

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def gradientReg(theta, X, y, learningRate):
    theta = np.matrix(theta)
    X = np.matrix(X)
    y = np.matrix(y)
    
    parameters = int(theta.ravel().shape[1])
    grad = np.zeros(parameters)
    
    error = sigmoid(X * theta.T) - y
    
    for i in range(parameters):
        term = np.multiply(error, X[:,i])
        
        if (i == 0):
            grad[i] = np.sum(term) / len(X)
        else:
            grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
    
    return grad 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:21,代码来源:3_logistic_regression.py

示例15: evaluate

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import sum [as 别名]
def evaluate(self, points):
        points = atleast_2d(points)

        d, m = points.shape
        if d != self.d:
            if d == 1 and m == self.d:
                # points was passed in as a row vector
                points = reshape(points, (self.d, 1))
                m = 1
            else:
                msg = "points have dimension %s, dataset has dimension %s" % (d,
                    self.d)
                raise ValueError(msg)

        result = zeros((m,), dtype=np.float)

        if m >= self.n:
            # there are more points than data, so loop over data
            for i in range(self.n):
                diff = self.dataset[:, i, newaxis] - points
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff*tdiff,axis=0) / 2.0
                result = result + exp(-energy)
        else:
            # loop over points
            for i in range(m):
                diff = self.dataset - points[:, i, newaxis]
                tdiff = dot(self.inv_cov, diff)
                energy = sum(diff * tdiff, axis=0) / 2.0
                result[i] = sum(exp(-energy), axis=0)

        result = result / self._norm_factor

        return result 
开发者ID:svviz,项目名称:svviz,代码行数:36,代码来源:kde.py


注:本文中的numpy.sum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。