本文整理汇总了Python中scipy.cluster.hierarchy.fclusterdata函数的典型用法代码示例。如果您正苦于以下问题:Python fclusterdata函数的具体用法?Python fclusterdata怎么用?Python fclusterdata使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fclusterdata函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hierarchicalClusteringScipy
def hierarchicalClusteringScipy(self, vectorLayer, attributesList, normalize, clusterThreshold, linkageMethod, criterion, metric, depth, max_clust, outputFieldName):
import scipy.cluster.hierarchy as hcluster
from numpy import array
fullObjectsList = []
features = vectorLayer.getFeatures()
for feature in features:
fullObjectsList.append([])
for attribute in attributesList:
if feature[attribute[0]]:
fullObjectsList[len(fullObjectsList) - 1].append(feature[attribute[0]])
else:
fullObjectsList[len(fullObjectsList) - 1].append(0)
# NORMALIZING
if normalize:
i = 0
maxValues = []
while i < len(attributesList):
maxValues.append(max(abs(item[i]) for item in fullObjectsList))
i += 1
j = 0
while j < len(fullObjectsList):
i = 0
while i < len(fullObjectsList[j]):
fullObjectsList[j][i] = (fullObjectsList[j][i] * 1.0) / (maxValues[i] * 1.0)
i += 1
j += 1
data = array(fullObjectsList)
if criterion == 'maxclust':
clusters = hcluster.fclusterdata(data, t=max_clust, criterion=criterion, method=linkageMethod,
metric=metric, depth=depth)
else:
clusters = hcluster.fclusterdata(data, t=clusterThreshold, criterion=criterion, method=linkageMethod,
metric=metric, depth=depth)
vectorLayerDataProvider = vectorLayer.dataProvider()
#
## Create field of not exist
if vectorLayer.fields().indexFromName(outputFieldName) == -1:
vectorLayerDataProvider.addAttributes([QgsField(outputFieldName, QVariant.Int)])
#
vectorLayer.updateFields()
vectorLayer.startEditing()
attrIdx = vectorLayer.fields().indexFromName(outputFieldName)
features = vectorLayer.getFeatures()
#
i = 0
for feature in features:
vectorLayer.changeAttributeValue(feature.id(), attrIdx, int(clusters[i]))
i += 1
#
vectorLayer.updateFields()
vectorLayer.commitChanges()
示例2: cluster_peaks_by_lane
def cluster_peaks_by_lane(peak_pos, hdist=8.0, return_sorted=True):
"""
:param peak_pos:
:param hdist:
:param return_sorted:
:return:
Refs:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fclusterdata.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html
https://web.archive.org/web/20100619134310/http://www.plantbio.ohiou.edu/epb/instruct/multivariate/Week7Lectures.PDF
Linkage methods:
single linkage - produces "chains"
complete linkage - produces "sperical" clusters
intermediate linkage -
Other clustering methods:
UPGMA -
WPGMA -
UPGMC -
WPGMC -
K-means - cluster into exactly K number of clusters
"""
hdist = float(hdist) # ensure float/numeric input
if hdist is None:
hdist = 8.0
xpos = np.array([[pos[1]] for pos in peak_pos])
# printarr(xpos, "xpos")
# maybe add a little bit of y-position to the mix?
# xpos = np.array([[pos[1], pos[0]/100] for pos in peak_pos])
lane_clusters = fclusterdata(xpos, t=0.2) # fclusterdata(X, t) is for N observations each with M variables.
lane_clusters = fclusterdata(xpos, t=hdist, criterion='distance', metric='euclidean', depth=2, method='single')
# lane_clusters = linkage(xpos) # defaults to 'single', 'euclidean'
# group lane-clustered peaks: lane_id -> array of peak pos.
peaks_by_lane = defaultdict(list)
for lane_id, pos in zip(lane_clusters, peak_pos):
peaks_by_lane[lane_id].append(list(pos))
# convert
for lane_id in peaks_by_lane:
peaks_by_lane[lane_id] = np.array(peaks_by_lane[lane_id])
# pprint(peaks_by_lane)
if return_sorted:
# sort by mean x-position (indexing as [y, x] aka [row, col])
peaks_by_lane = OrderedDict(sorted(peaks_by_lane.items(), key=lambda kv: kv[1][:, 1].mean()))
# pprint(list(peaks_by_lane.values()))
return peaks_by_lane
示例3: pcaCode
def pcaCode():
##Question: PCA descriptors, or PCA final profiles?
#Principal Component Analysis
pca = deco.PCA(n_components = 10)
Xp = pca.fit_transform(X)
#Z = hier.linkage(X)
Y = hier.fclusterdata(X, 1.15)
print "Num. Clusters (no PCA): %s"%max(Y)
Yp = hier.fclusterdata(Xp, 1.15)
print "Num. Clusters (with PCA): %s"%max(Yp)
示例4: cluster_lane_peaks_to_bands
def cluster_lane_peaks_to_bands(lane_peaks, vdist=5.0, img=None):
vdist = float(vdist) # ensure float/numeric input
# Special case, lane only has a single peak, nothing to cluster:
if len(lane_peaks) < 2:
this_lane_bands_peaks = {0: lane_peaks} # ensure we have a dict of peaks
# print("lane_id %s has only %s peaks" % (lane_id, len(lane_peaks)))
else:
# sort by row (y-coordinate):
# print("sorting bands in lane_id %s by y position (pos[0])" % lane_id)
band_clusters = fclusterdata(lane_peaks, t=vdist, criterion='distance', metric='euclidean', depth=2, method='single')
# lane_band_cluster_ids[lane_id] = band_clusters
# print("lane_id", lane_id)
# print("lane_peaks", lane_peaks)
# print("band_clusters", band_clusters)
# group, method (1) using defaultdict:
# cannot use dict.fromkeys, because it only takes static default values, not types/functions.
this_lane_bands_peaks = defaultdict(list)
for band_id, pos in zip(band_clusters, lane_peaks):
this_lane_bands_peaks[band_id].append(pos)
# alternative grouping methods: (2) zip, sort, then groupby;
# print("this_lane_bands_peaks", this_lane_bands_peaks)
# convert to nparray and take mean:
# convert the list of peaks for each band to ndarray:
for band_id in this_lane_bands_peaks:
this_lane_bands_peaks[band_id] = np.array(this_lane_bands_peaks[band_id])
return this_lane_bands_peaks
示例5: cluster
def cluster(points, thresh):
#the x,y,z points must first be separated out
ndata = [[],[],[]]
npts = (len(points)-2)/3
for j in range(0,npts):
x = float(points[2 + 3*j])
y = float(points[3 + 3*j])
z = float(points[4 + 3*j])
ndata[0].append(x)
ndata[1].append(y)
ndata[2].append(z)
data = np.asarray(ndata)
clusterlist = hcluster.fclusterdata(np.transpose(data), thresh, criterion="distance")
nclusters = findLargest(clusterlist)
#initializes an array to the right size
#http://stackoverflow.com/questions/7745562/appending-to-2d-lists-in-python
clusters = [[] for i in range(nclusters)]
#assingns points to the correct cluster
for i in range(0, npts):
#print clusters[clusterlist[i]-1]
clusters[clusterlist[i]-1].append([ndata[0][i],ndata[1][i],ndata[2][i]])
return [data, clusterlist, clusters]
示例6: cluster_qs
def cluster_qs(qs, k=None, threshold=1.5):
"""Cluster q vectors into discrete groups.
Classifies each of the q vectors into a number of clusters. The number of clusters used is decided by the parameters passed:
* If the k parameter is supplied then the q vectors are grouped into k clusters using kmeans.
* If the threshold parameter is supplied then the q vectors a split into groups based on cophenetic distance.
:param qs: list of q vectors to cluster. Each element should be a numpy array of length three.
:param k: number of clusters to use (optional).
:param threshold: cophenetic distance cut off point for new clusters (optional)
:returns: tuple (clusters, k)
Where:
list -- clusters is a list of cluster indicies which each q belongs to
int -- k is the number of clusters used
"""
if k is not None:
centroids = kmeans_plus_plus(qs, k)
_, clusters = kmeans2(qs, centroids, minit='matrix')
if len(set(clusters)) != k:
raise ValueError("Could not group the satellite reflections "
"into {} clusters. Please check that you have "
"at least {} satellites.".format(k,k))
else:
clusters = hcluster.fclusterdata(qs, threshold, criterion="distance")
return clusters, len(set(clusters))
示例7: searchForColorPoints
def searchForColorPoints(im, criteria):
points = []
pointColors = []
hsvIm = cv2.cvtColor(im, cv2.COLOR_BGR2HSV_FULL)
for i in range(11, im.shape[1] - 11, 10):
for j in range(11, im.shape[0] - 11, 10):
b = block(hsvIm, (i, j), 8)
if b[:, :, 0].std() > 25:
continue
color = (b[:, :, 0].mean(), b[:, :, 1].mean(), b[:, :, 2].mean())
matchedColor = matchColor(color, criteria)
if matchedColor >= 0:
points.append((i, j))
pointColors.append(matchedColor)
points = np.array(points, np.float16)
cluster = fclusterdata(points, 10, "distance")
centroids = []
for i in range(len(criteria)):
centroids.append([])
for i in range(1, cluster.max() + 1):
b = cluster == i
c = np.zeros((1, 2), np.int16)
for p in points[b.argsort()[len(b) - sum(b) :]]:
c = c + p / sum(b)
centroids[pointColors[b.argsort()[len(b) - sum(b)]]].append(c[0])
return centroids
示例8: identify
def identify(image, colors):
global pixelCounters
num_colors = 1
#data = numpy.zeros((1000,2))
n = 0
a = 0
for x in xrange(0, image.shape[0]):
for y in xrange(0, image.shape[1]):
a += 1
if a & 0b1111111 != 0:
continue
continue
for i in range(num_colors):
hue = image[x, y, 0]
sat = image[x, y, 1]
val = image[x, y, 2]
if hue >= 0 and hue < 10 and sat > 150 and val > 50:
data[n, 0] = x
data[n, 1] = y
n += 1
if n < 2:
return (None, None)
t = 30
data = data[0:n, :]
clusters = hcluster.fclusterdata(data, t, criterion="distance")
return (data, clusters)
示例9: magic_fragmentation
def magic_fragmentation(self):
""" This function takes the atom objects and tries to separate two fragments by a k-means-clustering algorithm. Always check the result before relying on those fragmentations!"""
#hardcoded number of fragments, for now always 2!
nr_frags = 2
coordinates = self.dimer.get_positions()
#
#centroids,_ = kmeans(coordinates, nr_frags)
# assign indices to clusters (bitmask!)
cluster_indices = fclusterdata(coordinates, self.magic_cutoff, criterion="distance")
# compress the whole coordinates to fragments
#coords_frag1 = np.array(list(itertools.compress(coordinates.tolist(), cluster_indices)))
# invert the bitmask
#cluster_indices = cluster_indices ^ 1
#coords_frag2 = np.array(list(itertools.compress(coordinates.tolist(), cluster_indices)))
self.frag1 = deepcopy(self.dimer)
self.frag2 = deepcopy(self.dimer)
# Now delete the atoms of the other fragment from the object with mighty pythonic list comprehensions!
del self.frag1[[atom.index for pos, atom in enumerate(self.frag1) if cluster_indices[pos] != 1]]
del self.frag2[[atom.index for pos, atom in enumerate(self.frag2) if cluster_indices[pos] != 2]]
print("Finished automatic fragmentation, please remember to check the result!")
self.__check_fragments__()
self.__set_charges__()
self.__get_frontiers__()
示例10: _agglomerative_cluster_encounters
def _agglomerative_cluster_encounters(X_data, seconds_thresh):
""" Agglomerative encounter clustering algorithm
Input: Length N array of data to cluster
Output: Length N array of cluster indexes
"""
label_arr = hier.fclusterdata(X_data, seconds_thresh, criterion='distance')
return label_arr
示例11: calc_best_result
def calc_best_result(coords, threshold=0.01):
"""
Calculates most possible result based on clustering of provided coordinates.
We assume that the bigger cluster represents the value most of the best agent
have agreed on. Method uses SciPy's hierarchy.fclusterdata function.
Parameters
----------
coords : list of two-element tuples
coordinates to guess result from
threshold : float
see documentation for scipy.hierarchy.fclusterdata
Returns
-------
x : float
x coordinate of the result
y : float
y coordinate of the result
"""
coords = np.array(coords)
t = coords[:,0].std()
idx = hierarchy.fclusterdata(coords, threshold * t)
best = int(stats.mode(idx)[0][0])
ans = np.array([coords[i] for i in range(len(coords)) if idx[i] == best])
return namedtuple('Ans', 'x, y')(ans[:,0].mean(), ans[:,1].mean())
示例12: merge_paths
def merge_paths(rides):
waypoints = list(itertools.chain(*[ride.route.waypoints for ride in rides]))
waypoints = sorted(waypoints, key=lambda x: x.country)
logger.info("Merging {} rides with {} total waypoints".format(len(rides), len(waypoints)))
for country, group in itertools.groupby(waypoints, key=lambda x: x.country):
waypoints = list(group)
country_lat_lng_points = [(x.lat, x.lng) for x in waypoints]
country_xyz_points = [latlng_to_xyz(lat, lng) for lat, lng in country_lat_lng_points]
logger.debug("Processing {} with {} waypoints".format(country, len(country_xyz_points)))
wh = whiten(country_xyz_points)
k_guess = max(1,len(country_xyz_points)/BEARABLE_CLUSTER_SIZE)
k_centroids = kmeans(wh,k_guess)[0]
k_labels = vq(wh, k_centroids)[0]
k_labeled = sorted(zip(country_xyz_points,country_lat_lng_points,waypoints,k_labels), key=lambda x: x[3])
logger.debug("Got {} miniclusters".format(len(k_centroids)))
for key, gr in itertools.groupby(k_labeled, key=lambda x:x[3]):
gr = list(gr)
k_waypoints = [x[2] for x in gr]
k_lat_lng_points = [x[1] for x in gr]
k_xyz_points = [x[0] for x in gr]
logger.debug("Running {} minicluster with {} waypoints".format(key, len(k_waypoints)))
cluster_labels = fclusterdata(np.array(k_xyz_points), 0.2, criterion="distance", metric="euclidean")
centroids = cluster_centroids(zip(k_lat_lng_points, cluster_labels))
logger.debug("Got {} hierarhical clusters".format(len(set(cluster_labels))))
for i in range(0, len(k_waypoints)):
new_lat, new_lng = centroids[cluster_labels[i]-1]
k_waypoints[i].lat = new_lat
k_waypoints[i].lng = new_lng
示例13: compute_encounters
def compute_encounters(hs, back, seconds_thresh=15):
'''
clusters encounters togethers (by time, not space)
An encounter is a meeting, localized in time and space between a camera and
a group of animals.
Animals are identified within each encounter.
'''
if not 'seconds_thresh' in vars():
seconds_thresh = 15
gx_list = hs.get_valid_gxs()
datetime_list = hs.gx2_exif(gx_list, tag='DateTime')
unixtime_list = [io.exiftime_to_unixtime(datetime_str) for datetime_str in datetime_list]
unixtime_list = np.array(unixtime_list)
X = np.vstack([unixtime_list, np.zeros(len(unixtime_list))]).T
print('[scripts] clustering')
# Build a mapping from clusterxs to member gxs
gx2_clusterid = fclusterdata(X, seconds_thresh, criterion='distance')
clusterx2_gxs = [[] for _ in xrange(gx2_clusterid.max())]
for gx, clusterx in enumerate(gx2_clusterid):
clusterx2_gxs[clusterx - 1].append(gx) # IDS are 1 based
clusterx2_nGxs = np.array(map(len, clusterx2_gxs))
print('cluster size stats: %s' % helpers.printable_mystats(clusterx2_nGxs))
# Change IDs such that higher number = more gxs
gx2_ex = [None] * len(gx2_clusterid)
gx2_eid = [None] * len(gx2_clusterid)
ex2_clusterx = clusterx2_nGxs.argsort()
ex2_gxs = [None] * len(ex2_clusterx)
for ex in xrange(len(ex2_clusterx)):
clusterx = ex2_clusterx[ex]
gxs = clusterx2_gxs[clusterx]
ex2_gxs[ex] = gxs
for gx in gxs:
nGx = len(gxs)
USE_STRING_ID = True
if USE_STRING_ID:
# String ID
eid = 'ex=%r_nGxs=%d' % (ex, nGx)
else:
# Float ID
eid = ex + (nGx / 10 ** np.ceil(np.log(nGx) / np.log(10)))
gx2_eid[gx] = eid
gx2_ex[gx] = ex
hs.tables.gx2_ex = np.array(gx2_ex)
hs.tables.gx2_eid = np.array(gx2_eid)
# Give info to GUI
extra_cols = {'eid': lambda gx_list: [gx2_eid[gx] for gx in iter(gx_list)]}
back.append_header('gxs', 'eid')
back.populate_image_table(extra_cols=extra_cols)
return locals()
示例14: clust
def clust(fp_list):
np_fps = []
for fp in fp_list:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
thresh = 6.5
clusters = hcluster.fclusterdata(np_fps, thresh, criterion="distance")
return clusters
示例15: clusterization
def clusterization(data, clastersNum = 2):
import scipy.cluster.hierarchy as hcluster
#import pylab
data = np.array(data)
#clusters = hcluster.fclusterdata(np.transpose(data), 3, criterion='maxclust', metric='euclidean', depth=1)
#clusters = hcluster.fclusterdata(data, 2, criterion='maxclust', metric='euclidean', depth=1)
thresh = 1.5
clusters = hcluster.fclusterdata(data, thresh, criterion="distance")
return np.array(clusters)