当前位置: 首页>>代码示例>>Python>>正文


Python neighbors.KDTree类代码示例

本文整理汇总了Python中sklearn.neighbors.KDTree的典型用法代码示例。如果您正苦于以下问题:Python KDTree类的具体用法?Python KDTree怎么用?Python KDTree使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了KDTree类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: match

def match(x,y,mytab):
    """Routine that matches the truth catalog
    with the input table
    
    Args:
    ----
        x: `float` RA of the truth objects to match (in degrees)
        y: `float` dec of the truth objects to match (in degrees)
        mytab: `astropy.table.Table` table containing the L2
            input catalog.

    Returns:
    -------
        ind: `int` array of indices to select the truth objects
            that match the detected objects
    """
    X = np.zeros((len(x),2))
    X[:,0]=x
    X[:,1]=y
    tree = KDTree(X,leaf_size=40)
    Y = np.zeros((len(mytab),2))
    Y[:,0]=mytab['coord_ra']*180/np.pi
    Y[:,1]=mytab['coord_dec']*180/np.pi
    dist, ind = tree.query(Y,k=1)
    print 'Matches with distance > 1 px, ', np.count_nonzero(dist>1)
    return ind
开发者ID:DarkEnergyScienceCollaboration,项目名称:SSim_DC1_Roadmap,代码行数:26,代码来源:input_output_comp.py

示例2: match_bright

def match_bright(x,y,x2,y2,mags,dist=1./3600.):
    """Routine that matches the truth catalog
    with the input table
    
    Args:
    ----
        x: `float` RA of the truth objects to match (in degrees)
        y: `float` dec of the truth objects to match (in degrees)
        x2: `float` RA of detected objects to match (in degrees)
        y2: `float` dec of detected objects to match (in degrees)
        mags: `float` array containing the true input magnitudes
        dist: `float` maximum distance in degrees considered to match
            the objects, the default is 1 arcsecond.
    Returns:
    -------
        brightest_ind: `int` array of indices to select the truth objects
            that match the detected objects, returns -1 if no match has
            been found for a particular object
    """
    X = np.zeros((len(x),2))
    X[:,0]=x
    X[:,1]=y
    Y = np.zeros((len(x2),2))
    Y[:,0]=x2
    Y[:,1]=y2
    tree = KDTree(X,leaf_size=40)
    ind = tree.query_radius(Y, r=dist)
    brightest_indices = np.zeros(len(ind),dtype=np.int64)
    for i,ii in enumerate(ind):
        sorted_indices = np.argsort(mags[ii])
        if(len(sorted_indices)>0):
            brightest_indices[i] = ii[sorted_indices[0]]
        else:
            brightest_indices[i]=-1 
    return brightest_indices
开发者ID:DarkEnergyScienceCollaboration,项目名称:SSim_DC1_Roadmap,代码行数:35,代码来源:input_output_comp.py

示例3: compute_centroids

def compute_centroids(X, C):
    """Compute the centroids for dataset X given centers C. Note: centers
    C may not belong to X.
    """
    tree = KDTree(X)
    centroids = tree.query(C, k=1, return_distance=False).squeeze()
    return centroids
开发者ID:emanuele,项目名称:minibatch_kmeans,代码行数:7,代码来源:kmeans.py

示例4: count_close

def count_close(x,y,x2,y2,distances):
    """Routine that counts the number of 
    objects that are within certain radius
    
    Args:
    ----
        x: `float` position X of objects to count
        y: `float` position Y of objects to count
        x2: `float` position X of the objects that serve as the center
            of the circle where we look for neighbors 
        y2: `float` position Y of the objects that serve as the center
            of the circle where we look for neighbors  
        distances: `float` array of radii where to count the objects
    Returns:
    -------
        neighbors: `float` the mean number of neighbors in a circle of radii
        corresponding to each entry of distances
        err: `float` standard deviation of the number of neighbors in a circle
        of radii corresponding to each entry of distances
    """
    X = np.zeros((len(x),2))
    X[:,0]=x
    X[:,1]=y
    Y = np.zeros((len(x2),2))
    Y[:,0]=x2
    Y[:,1]=y2
    tree = KDTree(X,leaf_size=40)
    neighbors = np.zeros(len(distances))
    err = np.zeros(len(distances))
    for i,distance in enumerate(distances):
        neighbors[i], err[i] = np.nanmean(tree.query_radius(Y, r=distance, count_only=True)), np.nanstd(tree.query_radius(Y, r=distance, count_only=True))
    return neighbors, err
开发者ID:DarkEnergyScienceCollaboration,项目名称:SSim_DC1_Roadmap,代码行数:32,代码来源:input_output_comp.py

示例5: compute_labels

def compute_labels(X, C):
    """Compute the cluster labels for dataset X given centers C.
    """
    # labels = np.argmin(pairwise_distances(C, X), axis=0) # THIS REQUIRES TOO MUCH MEMORY FOR LARGE X
    tree = KDTree(C)
    labels = tree.query(X, k=1, return_distance=False).squeeze()
    return labels
开发者ID:emanuele,项目名称:minibatch_kmeans,代码行数:7,代码来源:kmeans.py

示例6: buildDistanceMap

    def buildDistanceMap (self, X, Y):
        classes = np.unique(Y)
        nClasses = len(classes)
        tree = KDTree(X)
        nRows = X.shape[0]

        TSOri = np.array([]).reshape(0,self.k)

        distanceMap = np.array([]).reshape(0,self.k)
        labels = np.array([]).reshape(0,self.k)

        for row in range(nRows):
            distances, indicesOfNeighbors = tree.query(X[row].reshape(1,-1), k = self.k+1)

            distances = distances[0][1:]
            indicesOfNeighbors = indicesOfNeighbors[0][1:]

            distanceMap = np.append(distanceMap, np.array(distances).reshape(1,self.k), axis=0)
            labels = np.append(labels, np.array(Y[indicesOfNeighbors]).reshape(1,self.k),axis=0)

        for c in classes:
            nTraining = np.sum(Y == c)
            labelTmp = labels[Y.ravel() == c,:]

            tmpKNNClass = labelTmp.ravel()
            TSOri = np.append(TSOri, len(tmpKNNClass[tmpKNNClass == c]) / (nTraining*float(self.k)))

        return distanceMap, labels, TSOri    
开发者ID:timo-stoettner,项目名称:ENN,代码行数:28,代码来源:enn.py

示例7: kdtree

def kdtree(data, lake_matrix, k_neighbors = 10, leaf_size = 20):
    # training
    kdtree = KDTree(data, leaf_size=leaf_size, metric='euclidean')

    # testing
    distances, indices = kdtree.query(lake_matrix, k=k_neighbors)
    return np.array(indices), distances
开发者ID:GeysaFernandes,项目名称:LakeWater,代码行数:7,代码来源:alignment_candidates.py

示例8: _hdbscan_prims_kdtree

def _hdbscan_prims_kdtree(X, min_samples=5, alpha=1.0,
                          metric='minkowski', p=2, leaf_size=40, gen_min_span_tree=False):
    if metric == 'minkowski':
        if p is None:
            raise TypeError('Minkowski metric given but no p value supplied!')
        if p < 0:
            raise ValueError('Minkowski metric with negative p value is not defined!')
    elif p is None:
        p = 2  # Unused, but needs to be integer; assume euclidean

    dim = X.shape[0]
    min_samples = min(dim - 1, min_samples)

    tree = KDTree(X, metric=metric, leaf_size=leaf_size)

    dist_metric = DistanceMetric.get_metric(metric)

    core_distances = tree.query(X, k=min_samples,
                                dualtree=True,
                                breadth_first=True)[0][:, -1]
    min_spanning_tree = mst_linkage_core_cdist(X, core_distances, dist_metric, alpha)

    min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]

    single_linkage_tree = label(min_spanning_tree)

    return single_linkage_tree, None
开发者ID:xsongx,项目名称:hdbscan,代码行数:27,代码来源:hdbscan_.py

示例9: study_redmapper_lrg_3d

def study_redmapper_lrg_3d(hemi='north'):
    # create 3d grid object
    grid = grid3d(hemi=hemi)
    
    # load SDSS data
    sdss = load_sdss_data_both_catalogs(hemi)
    
    # load redmapper catalog
    rm = load_redmapper(hemi=hemi)
    
    # get XYZ positions (Mpc) of both datasets
    x_sdss, y_sdss, z_sdss = grid.xyz_from_radecz(sdss['ra'], sdss['dec'], sdss['z'], applyzcut=False)
    x_rm, y_rm, z_rm = grid.xyz_from_radecz(rm['ra'], rm['dec'], rm['z_spec'], applyzcut=False)
    pos_sdss = np.vstack([x_sdss, y_sdss, z_sdss]).T
    pos_rm = np.vstack([x_rm, y_rm, z_rm]).T

    # build a couple of KDTree's, one for SDSS, one for RM.
    from sklearn.neighbors import KDTree
    tree_sdss = KDTree(pos_sdss, leaf_size=30)
    tree_rm = KDTree(pos_rm, leaf_size=30)

    lrg_counts = tree_sdss.query_radius(pos_rm, 100., count_only=True)
    pl.clf()
    pl.hist(lrg_counts, bins=50)
    
    
    ipdb.set_trace()
开发者ID:amanzotti,项目名称:vksz,代码行数:27,代码来源:vksz.py

示例10: match

def match(x1, y1, x2=None, y2=None, k=5, kdt=None):
    X2 = np.vstack([x2, y2]).T
    X1 = np.vstack([x1, y1]).T
    if kdt is None:
        kdt = KDTree(X2, leaf_size=30, metric='euclidean')
    dists, inds = kdt.query(X1, k=k, return_distance=True)
    return dists, inds, kdt
开发者ID:bd-j,项目名称:pire,代码行数:7,代码来源:photo.py

示例11: concat_features_by_neighbors

def concat_features_by_neighbors(df_labels, df_features,
                                 X_names=['Offense Type'],
                                 grid=["Latitude", "Longitude"],
                                 radius=1./500.,
                                 scale=np.array([1.,1.])):

    df_labels = df_labels.dropna(subset=grid)
    df_features = df_features.dropna(subset=grid)

    X = df_features.as_matrix(X_names)
    xy_features = df_features.as_matrix(grid)
    xy_labels = df_labels.as_matrix(grid)
    tree = KDTree(xy_features*scale)

    vocabulary = set()
    features = []
    for nei in tree.query_radius(xy_labels*scale, radius):
        U,I = np.unique(X[nei], return_inverse=True)
        D = dict(zip(U,np.bincount(I)))
        map(vocabulary.add, D)
        features.append(D)

    return pd.concat([df_labels, pd.DataFrame([map(fi.get, vocabulary)
                      for fi in features],
                      index=df_labels.index,
                      columns=vocabulary).fillna(0.)], axis=1)
开发者ID:joubin,项目名称:MSBigDataHackathon,代码行数:26,代码来源:gps_join.py

示例12: uniform_points_points_sampling

def uniform_points_points_sampling(limits, points, n):
    """Select the spatial uniform points in the sample by sampling uniform
    spatial points and getting the nearest ones in the available ones.

    Parameters
    ----------
    limits: numpy.ndarray, shape (2, 2)
        the limits of the space. There is the square four limits which defines
        the whole retrievable region.
    points: numpy.ndarray
        the points in the space selected.
    n: int
        the number of samples we want.

    Returns
    -------
    indices: numpy.ndarray, shape(n)
        the indices of the samples.

    """

    ## 0. Initialize retriever
    retriever = KDTree(points)
    ## 1. Compute spatial uniform points
    points_s = uniform_points_sampling(limits, n)
    ## 2. Get the nearest points in the sample
    result = retriever.query(points_s, k=1)
    indices = result[1]
    indices = indices.astype(int)
    return indices
开发者ID:tgquintela,项目名称:pySpatialTools,代码行数:30,代码来源:sampling_from_points.py

示例13: _hdbscan_prims_kdtree

def _hdbscan_prims_kdtree(X, min_samples=5, alpha=1.0,
                          metric='minkowski', p=2, leaf_size=40, gen_min_span_tree=False):
    if metric == 'minkowski':
        if p is None:
            raise TypeError('Minkowski metric given but no p value supplied!')
        if p < 0:
            raise ValueError('Minkowski metric with negative p value is not defined!')
    elif p is None:
        p = 2  # Unused, but needs to be integer; assume euclidean

    size = X.shape[0]
    min_samples = min(size - 1, min_samples)

    tree = KDTree(X, metric=metric, leaf_size=leaf_size)

    #TO DO: Deal with p for minkowski appropriately
    dist_metric = DistanceMetric.get_metric(metric)

    #Get distance to kth nearest neighbour
    core_distances = tree.query(X, k=min_samples,
                                dualtree=True,
                                breadth_first=True)[0][:, -1]
    #Mutual reachability distance is implicite in mst_linkage_core_cdist
    min_spanning_tree = mst_linkage_core_cdist(X, core_distances, dist_metric, alpha)

    #Sort edges of the min_spanning_tree by weight
    min_spanning_tree = min_spanning_tree[np.argsort(min_spanning_tree.T[2]), :]

    #Convert edge list into standard hierarchical clustering format
    single_linkage_tree = label(min_spanning_tree)

    return single_linkage_tree, None
开发者ID:h-krishna,项目名称:hdbscan,代码行数:32,代码来源:hdbscan_.py

示例14: test_kdtree_projection

def test_kdtree_projection(datas):

    from sklearn.neighbors import KDTree
    from sklearn import random_projection


    # datas = parse()
    Fs = fingerprints(datas)

    # The random projection
    transformer = random_projection.GaussianRandomProjection(n_components = 128)
    Fs_new = transformer.fit_transform(Fs)
    print Fs_new.shape

    tree = KDTree(Fs_new, leaf_size=20)

    # Select a random target
    target_i = random.choice(range(len( datas )))
    target = datas[target_i]
    Tf = np.vstack([fingerprint(target)])
    Tf_new = transformer.transform(Tf)

    # Match it
    with timer(10):
        for _ in xrange(10):
            dist, ind = tree.query(Tf_new, k=3)
    assert datas[ind[0][0]] == datas[target_i]
开发者ID:gdanezis,项目名称:refreerank,代码行数:27,代码来源:test_extractrefdata.py

示例15: margin_new

def margin_new(indices, k, X, y):
    margins = []
    kd_tree = KDTree(X)
    for img_index in indices:
        margin = 0
        dist_to_class = 0
        dist_to_others = 0
        current_class = y[img_index]
        dists, neighbour_indices = kd_tree.query(X[img_index].reshape((1, X[img_index].shape[0])),
                                                 k)
        classes = {}
        for i in xrange(neighbour_indices[0].shape[0]):
            index = neighbour_indices[0][i]
            if y[index] in classes:
                classes[y[index]] += dists[0][i]
            else:
                classes[y[index]] = dists[0][i]
        dist_to_class = classes[current_class]
        classes.pop(current_class)
        # print classes.items()
        if classes:
            dist_to_others = min(classes.items(), key=lambda x: x[1])[1]
        margin = dist_to_class - dist_to_others
        margins.append(margin)
    return margins
开发者ID:penguin138,项目名称:mipt_ml,代码行数:25,代码来源:noise.py


注:本文中的sklearn.neighbors.KDTree类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。