当前位置: 首页>>代码示例>>Python>>正文


Python spatial.distance_matrix方法代码示例

本文整理汇总了Python中scipy.spatial.distance_matrix方法的典型用法代码示例。如果您正苦于以下问题:Python spatial.distance_matrix方法的具体用法?Python spatial.distance_matrix怎么用?Python spatial.distance_matrix使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.spatial的用法示例。


在下文中一共展示了spatial.distance_matrix方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: hyperball

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def hyperball(ndim, radius):
    """Return a binary morphological filter containing pixels within `radius`.

    Parameters
    ----------
    ndim : int
        The number of dimensions of the filter.
    radius : int
        The radius of the filter.

    Returns
    -------
    ball : array of bool, shape [2 * radius + 1,] * ndim
        The required structural element
    """
    size = 2 * radius + 1
    center = [(radius,) * ndim]

    coords = np.mgrid[[slice(None, size),] * ndim].reshape(ndim, -1).T
    distances = np.ravel(spatial.distance_matrix(coords, center))
    selector = distances <= radius

    ball = np.zeros((size,) * ndim, dtype=bool)
    ball.ravel()[selector] = True
    return ball 
开发者ID:jni,项目名称:skan,代码行数:27,代码来源:pre.py

示例2: _krig_matrix

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def _krig_matrix(self, src, drift):
        """Sets up the kriging system for a configuration of source points.
        """
        # the basic covariance matrix
        var_matrix = self.cov_func(spatial.distance_matrix(src, src))
        # the extended matrix, initialized to ones
        edk_matrix = np.ones((len(src) + 2, len(src) + 2))

        # adding entries for the first lagrange multiplier for the ordinary
        # kriging part
        edk_matrix[:-2, :-2] = var_matrix
        edk_matrix[-2, -2] = 0.0

        # adding entries for the second lagrange multiplier for the  edk part
        edk_matrix[:-2, -1] = drift
        edk_matrix[-1, :-2] = drift
        edk_matrix[-2:, -1] = 0.0
        edk_matrix[-1, -2:] = 0.0

        return edk_matrix 
开发者ID:wradlib,项目名称:wradlib,代码行数:22,代码来源:ipol.py

示例3: test_distance_matrix

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def test_distance_matrix():
    m = 10
    n = 11
    k = 4
    np.random.seed(1234)
    xs = np.random.randn(m,k)
    ys = np.random.randn(n,k)
    ds = distance_matrix(xs,ys)
    assert_equal(ds.shape, (m,n))
    for i in range(m):
        for j in range(n):
            assert_almost_equal(distance(xs[i],ys[j]),ds[i,j]) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:14,代码来源:test_kdtree.py

示例4: test_distance_matrix_looping

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def test_distance_matrix_looping():
    m = 10
    n = 11
    k = 4
    np.random.seed(1234)
    xs = np.random.randn(m,k)
    ys = np.random.randn(n,k)
    ds = distance_matrix(xs,ys)
    dsl = distance_matrix(xs,ys,threshold=1)
    assert_equal(ds,dsl) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:12,代码来源:test_kdtree.py

示例5: _cof

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def _cof(self, X):
        """
        Connectivity-Based Outlier Factor (COF) Algorithm
        This function is called internally to calculate the
        Connectivity-Based Outlier Factor (COF) as an outlier
        score for observations.
        :return: numpy array containing COF scores for observations.
                 The greater the COF, the greater the outlierness.
        """
        dist_matrix = np.array(distance_matrix(X, X))
        sbn_path_index, ac_dist, cof_ = [], [], []
        for i in range(X.shape[0]):
            sbn_path = sorted(range(len(dist_matrix[i])),
                              key=dist_matrix[i].__getitem__)
            sbn_path_index.append(sbn_path[1: self.n_neighbors_ + 1])
            cost_desc = []
            for j in range(self.n_neighbors_):
                cost_desc.append(
                    np.min(dist_matrix[sbn_path[j + 1]][sbn_path][:j + 1]))
            acd = []
            for _h, cost_ in enumerate(cost_desc):
                neighbor_add1 = self.n_neighbors_ + 1
                acd.append(((2. * (neighbor_add1 - (_h + 1))) / (
                        neighbor_add1 * self.n_neighbors_)) * cost_)
            ac_dist.append(np.sum(acd))
        for _g in range(X.shape[0]):
            cof_.append((ac_dist[_g] * self.n_neighbors_) /
                        np.sum(itemgetter(*sbn_path_index[_g])(ac_dist)))
        return np.nan_to_num(cof_) 
开发者ID:yzhao062,项目名称:pyod,代码行数:31,代码来源:cof.py

示例6: test_distance_matrix

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def test_distance_matrix():
    m = 10
    n = 11
    k = 4
    np.random.seed(1234)
    xs = np.random.randn(m,k)
    ys = np.random.randn(n,k)
    ds = distance_matrix(xs,ys)
    assert_equal(ds.shape, (m,n))
    for i in range(m):
        for j in range(n):
            assert_almost_equal(minkowski_distance(xs[i],ys[j]),ds[i,j]) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:14,代码来源:test_kdtree.py

示例7: get_umatrix

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def get_umatrix(input_vects, weights, m, n):
    """ Generates an n x m u-matrix of the SOM's weights and bmu indices of all the input data points

    Used to visualize higher-dimensional data. Shows the average distance between a SOM unit and its neighbors.
    When displayed, areas of a darker color separated by lighter colors correspond to clusters of units which
    encode similar information.
    :param weights: SOM weight matrix, `ndarray`
    :param m: Rows of neurons
    :param n: Columns of neurons
    :return: m x n u-matrix `ndarray` 
    :return: input_size x 1 bmu indices 'ndarray'
    """
    umatrix = np.zeros((m * n, 1))
    # Get the location of the neurons on the map to figure out their neighbors. I know I already have this in the
    # SOM code but I put it here too to make it easier to follow.
    neuron_locs = list()
    for i in range(m):
        for j in range(n):
            neuron_locs.append(np.array([i, j]))
    # Get the map distance between each neuron (i.e. not the weight distance).
    neuron_distmat = distance_matrix(neuron_locs, neuron_locs)

    for i in range(m * n):
        # Get the indices of the units which neighbor i
        neighbor_idxs = neuron_distmat[i] <= 1  # Change this to `< 2` if you want to include diagonal neighbors
        # Get the weights of those units
        neighbor_weights = weights[neighbor_idxs]
        # Get the average distance between unit i and all of its neighbors
        # Expand dims to broadcast to each of the neighbors
        umatrix[i] = distance_matrix(np.expand_dims(weights[i], 0), neighbor_weights).mean()

    bmu_indices = []
    for vect in input_vects:
        min_index = min([i for i in range(len(list(weights)))],
                        key=lambda x: np.linalg.norm(vect-
                                                     list(weights)[x]))
        bmu_indices.append(neuron_locs[min_index])
        
    return umatrix, bmu_indices 
开发者ID:cgorman,项目名称:tensorflow-som,代码行数:41,代码来源:example.py

示例8: greedy_k_center

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def greedy_k_center(self, labeled, unlabeled, amount):

        greedy_indices = []

        # get the minimum distances between the labeled and unlabeled examples (iteratively, to avoid memory issues):
        min_dist = np.min(distance_matrix(labeled[0, :].reshape((1, labeled.shape[1])), unlabeled), axis=0)
        min_dist = min_dist.reshape((1, min_dist.shape[0]))
        for j in range(1, labeled.shape[0], 100):
            if j + 100 < labeled.shape[0]:
                dist = distance_matrix(labeled[j:j+100, :], unlabeled)
            else:
                dist = distance_matrix(labeled[j:, :], unlabeled)
            min_dist = np.vstack((min_dist, np.min(dist, axis=0).reshape((1, min_dist.shape[1]))))
            min_dist = np.min(min_dist, axis=0)
            min_dist = min_dist.reshape((1, min_dist.shape[0]))

        # iteratively insert the farthest index and recalculate the minimum distances:
        farthest = np.argmax(min_dist)
        greedy_indices.append(farthest)
        for i in range(amount-1):
            dist = distance_matrix(unlabeled[greedy_indices[-1], :].reshape((1,unlabeled.shape[1])), unlabeled)
            min_dist = np.vstack((min_dist, dist.reshape((1, min_dist.shape[1]))))
            min_dist = np.min(min_dist, axis=0)
            min_dist = min_dist.reshape((1, min_dist.shape[0]))
            farthest = np.argmax(min_dist)
            greedy_indices.append(farthest)

        return np.array(greedy_indices) 
开发者ID:dsgissin,项目名称:DiscriminativeActiveLearning,代码行数:30,代码来源:query_methods.py

示例9: create_distances

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def create_distances(coords):
    """Create the distance matrix from a set of 3D coordinates.
    Note that we transform the element 0.0 in the matrix into a large value
    for processing by Gaussian exp(-d^2), where d is the distance.
    """
    distance_matrix = spatial.distance_matrix(coords, coords)
    return np.where(distance_matrix == 0.0, 1e6, distance_matrix) 
开发者ID:masashitsubaki,项目名称:molecularGNN_3Dstructure,代码行数:9,代码来源:preprocess.py

示例10: batched_delta_hyp

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def batched_delta_hyp(X, n_tries=10, batch_size=1500):
    vals = []
    for i in tqdm(range(n_tries)):
        idx = np.random.choice(len(X), batch_size)
        X_batch = X[idx]
        distmat = distance_matrix(X_batch, X_batch)
        diam = np.max(distmat)
        delta_rel = delta_hyp(distmat) / diam
        vals.append(delta_rel)
    return np.mean(vals), np.std(vals) 
开发者ID:leymir,项目名称:hyperbolic-image-embeddings,代码行数:12,代码来源:delta.py

示例11: get_delta

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def get_delta(loader):
    """
    computes delta value for image data by extracting features using VGG network;
    input -- data loader for images
    """
    vgg = torchvision.models.vgg16(pretrained=True)
    vgg_feats = vgg.features
    vgg_classifier = nn.Sequential(*list(vgg.classifier.children())[:-1])

    vgg_part = nn.Sequential(vgg_feats, Flatten(), vgg_classifier).to(device)
    vgg_part.eval()

    all_features = []
    for i, (batch, _) in enumerate(loader):
        with torch.no_grad():
            batch = batch.to(device)
            all_features.append(vgg_part(batch).detach().cpu().numpy())

    all_features = np.concatenate(all_features)
    idx = np.random.choice(len(all_features), 1500)
    all_features_small = all_features[idx]

    dists = distance_matrix(all_features_small, all_features_small)
    delta = delta_hyp(dists)
    diam = np.max(dists)
    return delta, diam 
开发者ID:leymir,项目名称:hyperbolic-image-embeddings,代码行数:28,代码来源:delta.py

示例12: trimesh_pull_points_numpy

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def trimesh_pull_points_numpy(mesh, points):
    # preprocess
    i_k = mesh.index_key()
    fk_fi = {fkey: index for index, fkey in enumerate(mesh.faces())}
    vertices = array(mesh.vertices_attributes('xyz'), dtype=float64).reshape((-1, 3))
    triangles = array([mesh.face_coordinates(fkey) for fkey in mesh.faces()], dtype=float64)
    points = array(points, dtype=float64).reshape((-1, 3))
    closest_vis = argmin(distance_matrix(points, vertices), axis=1)
    # transformation matrices
    # ?
    pulled_points = []
    # pull every point onto the mesh
    for i in range(points.shape[0]):
        point = points[i]
        closest_vi = closest_vis[i]
        closest_vk = i_k[closest_vi]
        closest_tris = [fk_fi[fk] for fk in mesh.vertex_faces(closest_vk, ordered=True) if fk is not None]
        # process the connected triangles
        d, p, c = _find_closest_component(
            point,
            vertices,
            triangles,
            closest_tris,
            closest_vi
        )
        pulled_points.append(p)
    return pulled_points


# ==============================================================================
# helpers
# ============================================================================== 
开发者ID:compas-dev,项目名称:compas,代码行数:34,代码来源:pull_numpy.py

示例13: create_datasets

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def create_datasets(dataset, physical_property, device):

    dir_dataset = '../dataset/' + dataset + '/'

    """Initialize atom_dict, in which
    each key is an atom type and each value is its index.
    """
    atom_dict = defaultdict(lambda: len(atom_dict))

    def create_dataset(filename):

        print(filename)

        """Load a dataset."""
        with open(dir_dataset + filename, 'r') as f:
            property_types = f.readline().strip().split()
            data_original = f.read().strip().split('\n\n')

        """The physical_property is an energy, HOMO, or LUMO."""
        property_index = property_types.index(physical_property)

        dataset = []

        for data in data_original:

            data = data.strip().split('\n')
            idx = data[0]
            property = float(data[-1].split()[property_index])

            """Load the atoms and their coordinates of a molecular data."""
            atoms, atom_coords = [], []
            for atom_xyz in data[1:-1]:
                atom, x, y, z = atom_xyz.split()
                atoms.append(atom)
                xyz = [float(v) for v in [x, y, z]]
                atom_coords.append(xyz)

            """Create each data with the above defined functions."""
            atoms = create_atoms(atoms, atom_dict)
            distance_matrix = create_distances(atom_coords)
            molecular_size = len(atoms)

            """Transform the above each data of numpy
            to pytorch tensor on a device (i.e., CPU or GPU).
            """
            atoms = torch.LongTensor(atoms).to(device)
            distance_matrix = torch.FloatTensor(distance_matrix).to(device)
            property = torch.FloatTensor([[property]]).to(device)

            dataset.append((atoms, distance_matrix, molecular_size, property))

        return dataset

    dataset_train = create_dataset('data_train.txt')
    dataset_train, dataset_dev = split_dataset(dataset_train, 0.9)
    dataset_test = create_dataset('data_test.txt')

    N_atoms = len(atom_dict)

    return dataset_train, dataset_dev, dataset_test, N_atoms 
开发者ID:masashitsubaki,项目名称:molecularGNN_3Dstructure,代码行数:62,代码来源:preprocess.py

示例14: calculate_swept_area_velocities

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def calculate_swept_area_velocities(self, local_wind_speed, coord, x, y, z):
        """
        This method calculates and returns the wind speeds at each
        rotor swept area grid point for the turbine, interpolated from
        the flow field grid.

        Args:
            wind_direction (float): The wind farm wind direction (deg).
            local_wind_speed (np.array): The wind speed at each grid point in
                the flow field (m/s).
            coord (:py:obj:`~.utilities.Vec3`): The coordinate of the turbine.
            x (np.array): The x-coordinates of the flow field grid.
            y (np.array): The y-coordinates of the flow field grid.
            z (np.array): The z-coordinates of the flow field grid.

        Returns:
            np.array: The wind speed at each rotor grid point
            for the turbine (m/s).
        """
        u_at_turbine = local_wind_speed

        # TODO:
        # # PREVIOUS METHOD========================
        # # UNCOMMENT IF ANY ISSUE UNCOVERED WITH NEW MOETHOD
        # x_grid = x
        # y_grid = y
        # z_grid = z

        # yPts = np.array([point[0] for point in self.grid])
        # zPts = np.array([point[1] for point in self.grid])

        # # interpolate from the flow field to get the flow field at the grid
        # # points
        # dist = [np.sqrt((coord.x1 - x_grid)**2 \
        #      + (coord.x2 + yPts[i] - y_grid) **2 \
        #      + (self.hub_height + zPts[i] - z_grid)**2) \
        #      for i in range(len(yPts))]
        # idx = [np.where(dist[i] == np.min(dist[i])) for i in range(len(yPts))]
        # data = [np.mean(u_at_turbine[idx[i]]) for i in range(len(yPts))]
        # # PREVIOUS METHOD========================

        # # NEW METHOD========================
        # Sort by distance
        flow_grid_points = np.column_stack([x.flatten(), y.flatten(), z.flatten()])

        # Set up a grid array
        y_array = np.array(self.grid)[:, 0] + coord.x2
        z_array = np.array(self.grid)[:, 1] + self.hub_height
        x_array = np.ones_like(y_array) * coord.x1
        grid_array = np.column_stack([x_array, y_array, z_array])

        ii = np.argmin(distance_matrix(flow_grid_points, grid_array), axis=0)

        # return np.array(data)
        return np.array(u_at_turbine.flatten()[ii]) 
开发者ID:NREL,项目名称:floris,代码行数:57,代码来源:turbine.py

示例15: closest_points_in_cloud_numpy

# 需要导入模块: from scipy import spatial [as 别名]
# 或者: from scipy.spatial import distance_matrix [as 别名]
def closest_points_in_cloud_numpy(points, cloud, threshold=10**7, distances=True, num_nbrs=1):
    """Find the closest points in a point cloud to a set of sample points.

    Parameters
    ----------
    points : array, list
        The sample points (n,).
    cloud : array, list
        The cloud points to compare to (n,).
    threshold : float
        Points are checked within this distance.
    distances : bool
        Return distance matrix.

    Returns
    -------
    list
        Indices of the closest points in the cloud per point in points.
    array
        Distances between points and closest points in cloud (n x n).

    Notes
    -----
    Items in cloud further from items in points than threshold return zero
    distance and will affect the indices returned if not set suitably high.

    Examples
    --------
    >>> a = np.random.rand(4, 3)
    >>> b = np.random.rand(4, 3)
    >>> indices, distances = closest_points(a, b, distances=True)
    [1, 2, 0, 3]
    array([[ 1.03821946,  0.66226402,  0.67964346,  0.98877891],
           [ 0.4650432 ,  0.54484186,  0.36158995,  0.60385484],
           [ 0.19562088,  0.73240154,  0.50235761,  0.51439644],
           [ 0.84680233,  0.85390316,  0.72154983,  0.50432293]])

    """
    from numpy import asarray
    from numpy import argmin
    from numpy import argpartition
    from scipy.spatial import distance_matrix

    points = asarray(points).reshape((-1, 3))
    cloud = asarray(cloud).reshape((-1, 3))
    d_matrix = distance_matrix(points, cloud, threshold=threshold)
    if num_nbrs == 1:
        indices = argmin(d_matrix, axis=1)
    else:
        indices = argpartition(d_matrix, num_nbrs, axis=1)
    if distances:
        return indices, d_matrix
    return indices 
开发者ID:compas-dev,项目名称:compas,代码行数:55,代码来源:distance.py


注:本文中的scipy.spatial.distance_matrix方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。