當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.bincount方法代碼示例

本文整理匯總了Python中numpy.bincount方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.bincount方法的具體用法?Python numpy.bincount怎麽用?Python numpy.bincount使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.bincount方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: preprocess_hog

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def preprocess_hog(digits):
	samples = []
	for img in digits:
		gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
		gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
		mag, ang = cv2.cartToPolar(gx, gy)
		bin_n = 16
		bin = np.int32(bin_n*ang/(2*np.pi))
		bin_cells = bin[:10,:10], bin[10:,:10], bin[:10,10:], bin[10:,10:]
		mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
		hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
		hist = np.hstack(hists)
		
		# transform to Hellinger kernel
		eps = 1e-7
		hist /= hist.sum() + eps
		hist = np.sqrt(hist)
		hist /= norm(hist) + eps
		
		samples.append(hist)
	return np.float32(samples)
#不能保證包括所有省份 
開發者ID:wzh191920,項目名稱:License-Plate-Recognition,代碼行數:24,代碼來源:predict.py

示例2: _radial_wvnum

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def _radial_wvnum(k, l, N, nfactor):
    """ Creates a radial wavenumber based on two horizontal wavenumbers
    along with the appropriate index map
    """

    # compute target wavenumbers
    k = k.values
    l = l.values
    K = np.sqrt(k[np.newaxis,:]**2 + l[:,np.newaxis]**2)
    nbins = int(N/nfactor)
    if k.max() > l.max():
        ki = np.linspace(0., l.max(), nbins)
    else:
        ki = np.linspace(0., k.max(), nbins)

    # compute bin index
    kidx = np.digitize(np.ravel(K), ki)
    # compute number of points for each wavenumber
    area = np.bincount(kidx)
    # compute the average radial wavenumber for each bin
    kr = (np.bincount(kidx, weights=K.ravel())
          / np.ma.masked_where(area==0, area))

    return ki, kr[1:-1] 
開發者ID:xgcm,項目名稱:xrft,代碼行數:26,代碼來源:xrft.py

示例3: reduce_fit

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def reduce_fit(interface, state, label, inp):
    import numpy as np
    out = interface.output(0)
    out.add("X_names", state["X_names"])

    forest = []
    group_fillins = []
    for i, (k, value) in enumerate(inp):
        if k == "tree":
            forest.append(value)
        elif len(value) > 0:
            group_fillins.append(value)
    out.add("forest", forest)

    fill_in_values = []
    if len(group_fillins) > 0:
        for i, type in enumerate(state["X_meta"]):
            if type == "c":
                fill_in_values.append(np.average([sample[i] for sample in group_fillins]))
            else:
                fill_in_values.append(np.bincount([sample[i] for sample in group_fillins]).argmax())
    out.add("fill_in_values", fill_in_values) 
開發者ID:romanorac,項目名稱:discomll,代碼行數:24,代碼來源:distributed_random_forest.py

示例4: calculate_weigths_labels

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def calculate_weigths_labels(dataset, dataloader, num_classes):
    # Create an instance from the data loader
    z = np.zeros((num_classes,))
    # Initialize tqdm
    tqdm_batch = tqdm(dataloader)
    print('Calculating classes weights')
    for sample in tqdm_batch:
        y = sample['label']
        y = y.detach().cpu().numpy()
        mask = (y >= 0) & (y < num_classes)
        labels = y[mask].astype(np.uint8)
        count_l = np.bincount(labels, minlength=num_classes)
        z += count_l
    tqdm_batch.close()
    total_frequency = np.sum(z)
    class_weights = []
    for frequency in z:
        class_weight = 1 / (np.log(1.02 + (frequency / total_frequency)))
        class_weights.append(class_weight)
    ret = np.array(class_weights)
    classes_weights_path = os.path.join(Path.db_root_dir(dataset), dataset+'_classes_weights.npy')
    np.save(classes_weights_path, ret)

    return ret 
開發者ID:clovaai,項目名稱:overhaul-distillation,代碼行數:26,代碼來源:calculate_weights.py

示例5: get_confusion_matrix

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def get_confusion_matrix(gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
開發者ID:speedinghzl,項目名稱:pytorch-segmentation-toolbox,代碼行數:21,代碼來源:evaluate.py

示例6: _optimize_2D

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def _optimize_2D(nodes, triangles, stay=[]):
    ''' Optimize the locations of the points by moving them towards the center
    of their patch. This is done iterativally for all points for a number of
    iterations and using a .05 step length'''
    edges, tr_edges, adjacency_list = _edge_list(triangles)
    boundary = edges[adjacency_list[:, 1] == -1].reshape(-1)
    stay = np.union1d(boundary, stay)
    stay = stay.astype(int)
    n_iter = 5
    step_length = .05
    mean_bar = np.zeros_like(nodes)
    new_nodes = np.copy(nodes)
    k = np.bincount(triangles.reshape(-1), minlength=len(nodes))
    for n in range(n_iter):
        bar = np.mean(new_nodes[triangles], axis=1)
        for i in range(2):
            mean_bar[:, i] = np.bincount(triangles.reshape(-1),
                                         weights=np.repeat(bar[:, i], 3),
                                         minlength=len(nodes))
        mean_bar /= k[:, None]
        new_nodes += step_length * (mean_bar - new_nodes)
        new_nodes[stay] = nodes[stay]
    return new_nodes 
開發者ID:simnibs,項目名稱:simnibs,代碼行數:25,代碼來源:electrode_placement.py

示例7: nodes_areas

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def nodes_areas(self):
        ''' Areas for all nodes in a surface

        Returns
        ---------
        nd: NodeData
            NodeData structure with normals for each node

        '''
        areas = self.elements_volumes_and_areas()[self.elm.triangles]
        triangle_nodes = self.elm[self.elm.triangles, :3] - 1
        nd = np.bincount(
            triangle_nodes.reshape(-1),
            np.repeat(areas/3., 3), self.nodes.nr
        )

        return NodeData(nd, 'areas') 
開發者ID:simnibs,項目名稱:simnibs,代碼行數:19,代碼來源:mesh_io.py

示例8: get_confusion_matrix

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def get_confusion_matrix(gt_label, pred_label, class_num):
    """
    Calcute the confusion matrix by given label and pred
    :param gt_label: the ground truth label
    :param pred_label: the pred label
    :param class_num: the nunber of class
    :return: the confusion matrix
    """
    index = (gt_label * class_num + pred_label).astype('int32')
    label_count = np.bincount(index)
    confusion_matrix = np.zeros((class_num, class_num))

    for i_label in range(class_num):
        for i_pred_label in range(class_num):
            cur_index = i_label * class_num + i_pred_label
            if cur_index < len(label_count):
                confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

    return confusion_matrix 
開發者ID:lxtGH,項目名稱:Fast_Seg,代碼行數:21,代碼來源:val.py

示例9: __init__

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def __init__(self,
                 dataset,
                 samples_per_gpu=1,
                 num_replicas=None,
                 rank=None):
        if num_replicas is None:
            num_replicas = get_world_size()
        if rank is None:
            rank = get_rank()
        self.dataset = dataset
        self.samples_per_gpu = samples_per_gpu
        self.num_replicas = num_replicas
        self.rank = rank
        self.epoch = 0

        assert hasattr(self.dataset, 'flag')
        self.flag = self.dataset.flag
        self.group_sizes = np.bincount(self.flag)

        self.num_samples = 0
        for i, j in enumerate(self.group_sizes):
            self.num_samples += int(
                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /
                          self.num_replicas)) * self.samples_per_gpu
        self.total_size = self.num_samples * self.num_replicas 
開發者ID:dingjiansw101,項目名稱:AerialDetection,代碼行數:27,代碼來源:sampler.py

示例10: get_confusion_matrix

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def get_confusion_matrix(self, gt_label, pred_label, class_num):
        """
        Calcute the confusion matrix by given label and pred
        :param gt_label: the ground truth label
        :param pred_label: the pred label
        :param class_num: the nunber of class
        :return: the confusion matrix
        """
        index = (gt_label * class_num + pred_label).astype('int32')
        label_count = np.bincount(index)
        confusion_matrix = np.zeros((class_num, class_num))

        for i_label in range(class_num):
            for i_pred_label in range(class_num):
                cur_index = i_label * class_num + i_pred_label
                if cur_index < len(label_count):
                    confusion_matrix[i_label, i_pred_label] = label_count[cur_index]

        return confusion_matrix 
開發者ID:tonysy,項目名稱:Deep-Feature-Flow-Segmentation,代碼行數:21,代碼來源:cityscape_video.py

示例11: test_with_incorrect_minlength

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def test_with_incorrect_minlength(self):
        x = np.array([], dtype=int)
        assert_raises_regex(TypeError,
                            "'str' object cannot be interpreted",
                            lambda: np.bincount(x, minlength="foobar"))
        assert_raises_regex(ValueError,
                            "must not be negative",
                            lambda: np.bincount(x, minlength=-1))

        x = np.arange(5)
        assert_raises_regex(TypeError,
                            "'str' object cannot be interpreted",
                            lambda: np.bincount(x, minlength="foobar"))
        assert_raises_regex(ValueError,
                            "must not be negative",
                            lambda: np.bincount(x, minlength=-1)) 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:18,代碼來源:test_function_base.py

示例12: _likelihood_weighted_resample

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def _likelihood_weighted_resample(self, samples, rowid, constraints=None,
            inputs=None, statenos=None, multiprocess=1):
        assert len(samples) == \
            len(self.states) if statenos is None else len(statenos)
        assert all(len(s) == len(samples[0]) for s in samples[1:])
        N = len(samples[0])
        weights = np.zeros(len(samples)) if not constraints else \
            self.logpdf(rowid, constraints, inputs,
                statenos=statenos, multiprocess=multiprocess)
        n_model = np.bincount(gu.log_pflip(weights, size=N, rng=self.rng))
        indexes = [self.rng.choice(N, size=n, replace=False) for n in n_model]
        resamples = [
            [s[i] for i in index]
            for s, index in zip(samples, indexes)
            if len(index) > 0
        ]
        return list(itertools.chain.from_iterable(resamples))

    # --------------------------------------------------------------------------
    # Serialize 
開發者ID:probcomp,項目名稱:cgpm,代碼行數:22,代碼來源:engine.py

示例13: plot_dist_discrete

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def plot_dist_discrete(X, output, clusters, ax=None, Y=None, hist=True):
    # Create a new axis?
    if ax is None:
        _, ax = plt.subplots()
    # Set up x axis.
    X = np.asarray(X, dtype=int)
    x_max = max(X)
    Y = range(int(x_max)+1)
    X_hist = np.bincount(X) / float(len(X))
    ax.bar(Y, X_hist, color='gray', edgecolor='none')
    # Compute weighted pdfs
    pdf = np.zeros((len(clusters), len(Y)))
    W = [log(clusters[k].N) - log(float(len(X))) for k in clusters]
    for i, k in enumerate(clusters):
        pdf[i,:] = np.exp(
            [W[i] + clusters[k].logpdf(None, {output:y}) for y in Y])
        color, alpha = gu.curve_color(i)
        ax.bar(Y, pdf[i,:], color=color, edgecolor='none', alpha=alpha)
    # Plot the sum of pdfs.
    ax.bar(
        Y, np.sum(pdf, axis=0), color='none', edgecolor='black', linewidth=3)
    ax.set_xlim([0, x_max+1])
    # Title.
    ax.set_title(clusters.values()[0].name())
    return ax 
開發者ID:probcomp,項目名稱:cgpm,代碼行數:27,代碼來源:plots.py

示例14: test_crp_decrement

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def test_crp_decrement(N, alpha, seed):
    A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed))
    Nk = list(np.bincount(A))
    # Decrement all counts by 1.
    Nk = [n-1 if n > 1 else n for n in Nk]

    # Decrement rowids.
    crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed))
    targets = [c for c in crp.counts if crp.counts[c] > 1]
    seen = set([])
    for r, c in crp.data.items():
        if c in targets and c not in seen:
            seen.add(c)
            crp.unincorporate(r)
        if seen == len(targets):
            break

    assert_crp_equality(alpha, Nk, crp) 
開發者ID:probcomp,項目名稱:cgpm,代碼行數:20,代碼來源:test_crp.py

示例15: test_crp_increment

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import bincount [as 別名]
def test_crp_increment(N, alpha, seed):
    A = gu.simulate_crp(N, alpha, rng=gu.gen_rng(seed))
    Nk = list(np.bincount(A))
    # Add 3 new classes.
    Nk.extend([2, 3, 1])

    crp = simulate_crp_gpm(N, alpha, rng=gu.gen_rng(seed))
    # Increment rowids.
    rowid = max(crp.data)
    clust = max(crp.data.values())
    crp.incorporate(rowid+1, {0:clust+1}, None)
    crp.incorporate(rowid+2, {0:clust+1}, None)
    crp.incorporate(rowid+3, {0:clust+2}, None)
    crp.incorporate(rowid+4, {0:clust+2}, None)
    crp.incorporate(rowid+5, {0:clust+2}, None)
    crp.incorporate(rowid+6, {0:clust+3}, None)

    assert_crp_equality(alpha, Nk, crp) 
開發者ID:probcomp,項目名稱:cgpm,代碼行數:20,代碼來源:test_crp.py


注:本文中的numpy.bincount方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。