本文整理汇总了Python中sklearn.metrics.pairwise.pairwise_distances方法的典型用法代码示例。如果您正苦于以下问题:Python pairwise.pairwise_distances方法的具体用法?Python pairwise.pairwise_distances怎么用?Python pairwise.pairwise_distances使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.metrics.pairwise
的用法示例。
在下文中一共展示了pairwise.pairwise_distances方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_paired_distances
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def test_paired_distances(metric, func):
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
示例2: test_trustworthiness_precomputed_deprecation
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def test_trustworthiness_precomputed_deprecation():
# FIXME: Remove this test in v0.23
# Use of the flag `precomputed` in trustworthiness parameters has been
# deprecated, but will still work until v0.23.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
assert_equal(assert_warns(DeprecationWarning, trustworthiness,
pairwise_distances(X), X, precomputed=True), 1.)
assert_equal(assert_warns(DeprecationWarning, trustworthiness,
pairwise_distances(X), X, metric='precomputed',
precomputed=True), 1.)
assert_raises(ValueError, assert_warns, DeprecationWarning,
trustworthiness, X, X, metric='euclidean', precomputed=True)
assert_equal(assert_warns(DeprecationWarning, trustworthiness,
pairwise_distances(X), X, metric='euclidean',
precomputed=True), 1.)
示例3: _run_answer_test
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64, copy=False)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
示例4: construct_M
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def construct_M(X, k, gamma):
"""
This function constructs the M matrix described in the paper
"""
n_sample, n_feature = X.shape
Xt = X.T
D = pairwise_distances(X)
# sort the distance matrix D in ascending order
idx = np.argsort(D, axis=1)
# choose the k-nearest neighbors for each instance
idx_new = idx[:, 0:k+1]
H = np.eye(k+1) - 1/(k+1) * np.ones((k+1, k+1))
I = np.eye(k+1)
Mi = np.zeros((n_sample, n_sample))
for i in range(n_sample):
Xi = Xt[:, idx_new[i, :]]
Xi_tilde =np.dot(Xi, H)
Bi = np.linalg.inv(np.dot(Xi_tilde.T, Xi_tilde) + gamma*I)
Si = np.zeros((n_sample, k+1))
for q in range(k+1):
Si[idx_new[q], q] = 1
Mi = Mi + np.dot(np.dot(Si, np.dot(np.dot(H, Bi), H)), Si.T)
M = np.dot(np.dot(X.T, Mi), X)
return M
示例5: information_density
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def information_density(X: modALinput, metric: Union[str, Callable] = 'euclidean') -> np.ndarray:
"""
Calculates the information density metric of the given data using the given metric.
Args:
X: The data for which the information density is to be calculated.
metric: The metric to be used. Should take two 1d numpy.ndarrays for argument.
Todo:
Should work with all possible modALinput.
Perhaps refactor the module to use some stuff from sklearn.metrics.pairwise
Returns:
The information density for each sample.
"""
# inf_density = np.zeros(shape=(X.shape[0],))
# for X_idx, X_inst in enumerate(X):
# inf_density[X_idx] = sum(similarity_measure(X_inst, X_j) for X_j in X)
#
# return inf_density/X.shape[0]
similarity_mtx = 1/(1+pairwise_distances(X, X, metric=metric))
return similarity_mtx.mean(axis=1)
示例6: _eval_retrieval
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def _eval_retrieval(PX, PY, GX, GY):
# D_{i, j} is the distance between the ith array from PX and the jth array from GX.
D = pairwise_distances(PX, GX, metric=args.method, n_jobs=-2)
Rank = np.argsort(D, axis=1)
# Evaluation
recall_1 = recall_at_k(Rank, PY, GY, k=1) # Recall @ K
print "{:8}{:8.2%}".format('Recall@1', recall_1)
recall_5 = recall_at_k(Rank, PY, GY, k=5) # Recall @ K
print "{:8}{:8.2%}".format('Recall@5', recall_5)
recall_10 = recall_at_k(Rank, PY, GY, k=10) # Recall @ K
print "{:8}{:8.2%}".format('Recall@10', recall_10)
map_value = mean_average_precision(Rank, PY, GY) # Mean Average Precision
print "{:8}{:8.2%}".format('MAP', map_value)
return recall_1, recall_5, recall_10, map_value
示例7: predict
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def predict(self, X):
"""
Classify the input data assigning the label of the nearest prototype
Keyword arguments:
X -- The feature vectors
"""
classification=np.zeros(len(X))
if self.distance_metric=="euclidean":
distances=pairwise_distances(X, self.M_,self.distance_metric) #compute distances to the prototypes (template matching)
if self.distance_metric=="minkowski":
distances=pairwise_distances(X, self.M_,self.distance_metric)
elif self.distance_metric=="manhattan":
distances=pairwise_distances(X, self.M_,self.distance_metric)
elif self.distance_metric=="mahalanobis":
distances=pairwise_distances(X, self.M_,self.distance_metric)
else:
distances=pairwise_distances(X, self.M_,"euclidean")
for i in xrange(len(X)):
classification[i]=self.outcomes[distances[i].tolist().index(min(distances[i]))] #choose the class belonging to nearest prototype distance
return classification
示例8: transform
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def transform(self, X):
"""Transforms X to cluster-distance space.
Parameters
----------
X : {array-like, sparse matrix}, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_new : {array-like, sparse matrix}, shape=(n_samples, n_clusters)
X transformed in the new space of distances to cluster centers.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
check_is_fitted(self, "cluster_centers_")
if callable(self.distance_metric):
return self.distance_metric(X, Y=self.cluster_centers_)
else:
return pairwise_distances(X, Y=self.cluster_centers_,
metric=self.distance_metric)
示例9: test_precomputed_cross_validation
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def test_precomputed_cross_validation():
# Ensure array is split correctly
rng = np.random.RandomState(0)
X = rng.rand(20, 2)
D = pairwise_distances(X, metric='euclidean')
y = rng.randint(3, size=20)
for Est in (neighbors.KNeighborsClassifier,
neighbors.RadiusNeighborsClassifier,
neighbors.KNeighborsRegressor,
neighbors.RadiusNeighborsRegressor):
metric_score = cross_val_score(Est(algorithm_params={'n_candidates': 5}), X, y)
precomp_score = cross_val_score(Est(metric='precomputed',
algorithm_params={'n_candidates': 5},
),
D, y)
assert_array_equal(metric_score, precomp_score)
示例10: Calculate_Distance_1
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def Calculate_Distance_1(dist1,dist2,metric,min_predicts,Lists_Num):
global ThreadingState1
global ThreadingState2
ThreadingState1=0
ThreadingState2=0
i=0
for sublist in range(Lists_Num/2):
predicts1 = pw.pairwise_distances(dist1[i], dist2, metric=metric)
i=i+2
if predicts1[0][0] > 0.12:
if ThreadingState2 is 1:
break
if predicts1[0][0] < min_predicts :
min_predicts = predicts1[0][0]
else:
min_predicts = predicts1[0][0]
ThreadingState1=1
break
示例11: _run_answer_test
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
from scipy.sparse import csr_matrix
P = csr_matrix(pij_input)
neighbors = P.indices.astype(np.int64)
indptr = P.indptr.astype(np.int64)
_barnes_hut_tsne.gradient(P.data, pos_output, neighbors, indptr,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
示例12: find_matching_ids
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def find_matching_ids(self, embs):
if self.id_names:
matching_ids = []
matching_distances = []
distance_matrix = pairwise_distances(embs, self.embeddings)
for distance_row in distance_matrix:
min_index = np.argmin(distance_row)
if distance_row[min_index] < self.distance_treshold:
matching_ids.append(self.id_names[min_index])
matching_distances.append(distance_row[min_index])
else:
matching_ids.append(None)
matching_distances.append(None)
else:
matching_ids = [None] * len(embs)
matching_distances = [np.inf] * len(embs)
return matching_ids, matching_distances
示例13: dendrogram
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def dendrogram(data,
vectorizer,
method="ward",
color_threshold=1,
size=10,
filename=None):
"""dendrogram.
"median","centroid","weighted","single","ward","complete","average"
"""
data = list(data)
# get labels
labels = []
for graph in data:
label = graph.graph.get('id', None)
if label:
labels.append(label)
# transform input into sparse vectors
data_matrix = vectorizer.transform(data)
# labels
if not labels:
labels = [str(i) for i in range(data_matrix.shape[0])]
# embed high dimensional sparse vectors in 2D
from sklearn import metrics
from scipy.cluster.hierarchy import linkage, dendrogram
distance_matrix = metrics.pairwise.pairwise_distances(data_matrix)
linkage_matrix = linkage(distance_matrix, method=method)
plt.figure(figsize=(size, size))
dendrogram(linkage_matrix,
color_threshold=color_threshold,
labels=labels,
orientation='right')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
示例14: gs_exact
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def gs_exact(X, N, k='auto', seed=None, replace=False,
tol=1e-3, n_iter=300, verbose=1):
ge_idx = gs(X, N, replace=replace)
dist = pairwise_distances(X, n_jobs=-1)
cost = dist.max()
iter_i = 0
while iter_i < n_iter:
if verbose:
log('iter_i = {}'.format(iter_i))
labels = np.argmin(dist[ge_idx, :], axis=0)
ge_idx_new = []
for cluster in range(N):
cluster_idx = np.nonzero(labels == cluster)[0]
if len(cluster_idx) == 0:
ge_idx_new.append(ge_idx[cluster])
continue
X_cluster = dist[cluster_idx, :]
X_cluster = X_cluster[:, cluster_idx]
within_idx = np.argmin(X_cluster.max(0))
ge_idx_new.append(cluster_idx[within_idx])
ge_idx = ge_idx_new
cost, prev_cost = dist[ge_idx, :].min(0).max(), cost
assert(cost <= prev_cost)
if prev_cost - cost < tol:
break
iter_i += 1
return ge_idx
示例15: fisher
# 需要导入模块: from sklearn.metrics import pairwise [as 别名]
# 或者: from sklearn.metrics.pairwise import pairwise_distances [as 别名]
def fisher(yhat,y,samples=False):
"""Fisher criterion"""
classes = np.unique(y)
mu = np.zeros(len(classes))
v = np.zeros(len(classes))
# pdb.set_trace()
for c in classes.astype(int):
mu[c] = np.mean(yhat[y==c])
v[c] = np.var(yhat[y==c])
if not samples:
fisher = 0
for c1,c2 in pairwise(classes.astype(int)):
fisher += np.abs(mu[c1] - mu[c2])/np.sqrt(v[c1]+v[c2])
else:
# lexicase version
fisher = np.zeros(len(yhat))
# get closests classes to each class (min mu distance)
mu_d = pairwise_distances(mu.reshape(-1,1))
min_mu=np.zeros(len(classes),dtype=int)
for i in np.arange(len(min_mu)):
min_mu[i] = np.argsort(mu_d[i])[1]
# for c1, pairwise(classes.astype(int)):
# min_mu[c1] = np.argmin()
for i,l in enumerate(yhat.astype(int)):
fisher[i] = np.abs(l - mu[min_mu[y[i]]])/np.sqrt(v[y[i]]+v[min_mu[y[i]]])
# pdb.set_trace()
return fisher