本文整理汇总了Python中scipy.cluster.vq函数的典型用法代码示例。如果您正苦于以下问题:Python vq函数的具体用法?Python vq怎么用?Python vq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vq函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: find_grid
def find_grid(self,inp):
dx,dy = [],[]
outx,outy = [],[]
for i in inp:
dy.append([i[2],0])
data = np.vstack(dy)
centroids,_ = kmeans(data,self.Ny)
idx,_ = vq(data,centroids)
for ii in range(0,self.Ny):
mini = np.min(data[idx==ii,0])
maxi = np.max(data[idx==ii,0])
outy.append([mini,maxi])
outy = sorted(outy[:],key=lambda s:s[1])
dx = []
for i in inp:
dx.append([i[1],0])
data = np.vstack(dx)
centroids,_ = kmeans(data,self.Nx)
idx,_ = vq(data,centroids)
for ii in range(0,self.Nx):
mini = np.min(data[idx==ii,0])
maxi = np.max(data[idx==ii,0])
outx.append([mini,maxi])
outx = sorted(outx[:],key=lambda s:s[1])
out = []
for y in range(0,len(outy)):
for x in range(0,len(outx)):
k = 0
for k in range(0,len(inp)):
if (inp[k][1]>=outx[x][0]) and (inp[k][1]<=outx[x][1]) and (inp[k][2]>=outy[y][0]) and (inp[k][2]<=outy[y][1]):
out.append(inp[k])
else:
k+=1
return out
示例2: compare
def compare(m, Nobs, Ncodes, Nfeatures):
obs = RandomArray.normal(0., 1., (Nobs, Nfeatures))
codes = RandomArray.normal(0., 1., (Ncodes, Nfeatures))
import scipy.cluster.vq
scipy.cluster.vq
print 'vq with %d observation, %d features and %d codes for %d iterations' % \
(Nobs,Nfeatures,Ncodes,m)
t1 = time.time()
for i in range(m):
code, dist = scipy.cluster.vq.py_vq(obs, codes)
t2 = time.time()
py = (t2 - t1)
print ' speed in python:', (t2 - t1) / m
print code[:2], dist[:2]
t1 = time.time()
for i in range(m):
code, dist = scipy.cluster.vq.vq(obs, codes)
t2 = time.time()
print ' speed in standard c:', (t2 - t1) / m
print code[:2], dist[:2]
print ' speed up: %3.2f' % (py / (t2 - t1))
# load into cache
b = vq(obs, codes)
t1 = time.time()
for i in range(m):
code, dist = vq(obs, codes)
t2 = time.time()
print ' speed inline/blitz:', (t2 - t1) / m
print code[:2], dist[:2]
print ' speed up: %3.2f' % (py / (t2 - t1))
# load into cache
b = vq2(obs, codes)
t1 = time.time()
for i in range(m):
code, dist = vq2(obs, codes)
t2 = time.time()
print ' speed inline/blitz2:', (t2 - t1) / m
print code[:2], dist[:2]
print ' speed up: %3.2f' % (py / (t2 - t1))
# load into cache
b = vq3(obs, codes)
t1 = time.time()
for i in range(m):
code, dist = vq3(obs, codes)
t2 = time.time()
print ' speed using C arrays:', (t2 - t1) / m
print code[:2], dist[:2]
print ' speed up: %3.2f' % (py / (t2 - t1))
示例3: getBOVW
def getBOVW(sift_key):
global codebook
new_line = ""
sift_key_points = []
lines = sift_key.readlines()
lines = lines[1:]
for i in range(len(lines)):
if (i % 8) == 0:
if new_line != "":
new_line = new_line.strip()
tokens = new_line.split()
tokens = map(int, tokens)
sift_key_points.append(tokens)
new_line = ""
else:
new_line += (lines[i].strip() + ' ')
sift_key_points = np.array(sift_key_points)
codebook = np.array(codebook)
idx, _ = vq(sift_key_points, codebook)
BOVW = []
for i in range(k):
BOVW.append(list(idx).count(i+1)/ len(sift_key_points))
return BOVW
示例4: BOWMatch
def BOWMatch(self, indexPath):
'''the query's score against an individual index'''
# start = time.time()
query_des_list = []
im_features, image_paths, idf, numWords, voc = joblib.load(indexPath)
numWords = self.numWords
desc = cv2.xfeatures2d.SIFT_create()
# Extract the descriptors from the query
query = self.image
kp, des = desc.detectAndCompute(query, None)
query_des_list.append((query, des))
# Stack query descriptors in a numpy array
query_descriptors = query_des_list[0][1]
# Calculate histogram of Features for the Query
test_features = np.zeros((1, numWords), "float32")
words, distance = vq(query_descriptors, voc)
for w in words:
test_features[0][w] += 1
# Perform Tf-idf vectorization for the Query
test_features = test_features * idf
test_features = preprocessing.normalize(test_features, norm='l2')
score = np.dot(test_features, im_features.T)
return score
示例5: detectPupilKMeans
def detectPupilKMeans(gray,K=2,distanceWeight=2,reSize=(40,40)):
''' Detects the pupil in the image, gray, using k-means
gray : grays scale image
K : Number of clusters
distanceWeight : Defines the weight of the position parameters
reSize : the size of the image to do k-means on
'''
#Resize for faster performance
smallI = cv2.resize(gray, reSize)
M,N = smallI.shape
#Generate coordinates in a matrix
X,Y = np.meshgrid(range(M),range(N))
#Make coordinates and intensity into one vectors
z = smallI.flatten()
x = X.flatten()
y = Y.flatten()
O = len(x)
#make a feature vectors containing (x,y,intensity)
features = np.zeros((O,3))
features[:,0] = z;
features[:,1] = y/distanceWeight; #Divide so that the distance of position weighs less than intensity
features[:,2] = x/distanceWeight;
features = np.array(features,'f')
# cluster data
centroids,variance = kmeans(features,K)
#use the found clusters to map
label,distance = vq(features,centroids)
# re-create image from
labelIm = np.array(np.reshape(label,(M,N)))
f = figure(1)
imshow(labelIm)
f.canvas.draw()
f.show()
示例6: buildHistogramForVideo
def buildHistogramForVideo(pathToVideo, vocabulary):
frames = os.listdir(pathToVideo)
size = len(vocabulary)
stackOfHistogram = np.zeros(size).reshape(1, size)
for frame in frames:
# build histogram for this frame
completePath = pathToVideo +"/"+ frame
lines = open(completePath, "r").readlines()
print completePath
frameFeatures = np.zeros(128).reshape(1, 128)
for line in lines[1:]:
data = line.split(" ")
feature = data[4:]
for i in range(len(feature)):
item = int(feature[i])
feature[i] = item
feature = normalizeSIFT(feature)
frameFeatures = np.vstack((frameFeatures, feature))
frameFeatures = frameFeatures[1:]
codes, distance = vq(frameFeatures, vocabulary)
histogram = np.zeros(size)
for code in codes:
histogram[code] += 1
stackOfHistogram = np.vstack((stackOfHistogram, histogram.reshape(1,size)))
return stackOfHistogram[1:]
示例7: predictor
def predictor(im, w, queue):
global fea_det
global step_size
global k
global voc
global clf
global classes_names
global stdSlr
global image_paths
best = 0
for (x_pt, y_pt, window) in sliding_window(im, stepSize=16, windowSize=(w,w)):
if window.shape[0] != w or window.shape[1] != w:
continue
kpts = [cv2.KeyPoint(x, y, step_size) for y in range(0, window.shape[0], step_size)
for x in range(0, window.shape[1], step_size)]
(kpts, des) = fea_det.compute(window, kpts) # compute dense descriptors
des = whiten(des)
test_features = np.zeros((len(image_paths), k), "float32")
words, L2distance = vq(des, voc)
for wd in words:
test_features[0][wd] += 1
nbr_occurences = np.sum( (test_features > 0) * 1, axis = 0)
idf = np.array(np.log((1.0*len(image_paths)+1) / (1.0*nbr_occurences + 1)), 'float32')
test_features = stdSlr.transform(test_features)
probs = np.array(clf.predict_proba(test_features))
ind = np.argmax(probs)
max_prob = np.max(probs)
if max_prob > best:
predictions = (classes_names[ind], max_prob)
best = max_prob
#print(predictions)
queue.put(predictions)
示例8: classify
def classify(im):
if im == None:
print "No such file {}\nCheck if the file exists".format(image_path)
return -1
# Load the classifier, class names, scaler, number of clusters and vocabulary
clf, classes_names, stdSlr, k, voc = joblib.load("bow.pkl")
sift = cv2.xfeatures2d.SIFT_create()
kpts, des = sift.detectAndCompute(im, None)
test_features = np.zeros((1, k), "float32")
# words, distance = vq(des_list[0][1],voc)
words, distance = vq(des, voc)
for w in words:
test_features[0][w] += 1
# Perform tf-idf vectorization
nbr_occurences = np.sum((test_features > 0) * 1, axis=0)
idf = np.array(np.log((1.0 * 1 + 1) / (1.0 * nbr_occurences + 1)), "float32")
# Scale the features
test_features = stdSlr.transform(test_features)
# Perform the predictions
predictions = [classes_names[i] for i in clf.predict(test_features)]
return predictions
示例9: k_mean_plot_AMN
def k_mean_plot_AMN(c,folder, list_vectors_ANM):
"""Creates a k-means clustering mainly for dcds trayectories cluster"""
for i in list_vectors_ANM:
# DEFINE ONE VAR (fixed number of variables = number of PDB-DCD pairs = 4 in our case)
var1 = list_vectors_ANM[0]
var2 = list_vectors_ANM[1]
var3 = list_vectors_ANM[2]
var4 = list_vectors_ANM[3]
features = np.array([])
features=np.append(features,var1)
features=np.append(features,var2)
features=np.append(features,var3)
features=np.append(features,var4)
centroids,variance = kmeans(features,c)
code,distance = vq(features,centroids)
for j in range(len(var1)-1):
pylab.plot([p[j] for p in var1],[p[j+1] for p in var1],'*')
pylab.plot([p[j] for p in var2],[p[j+1] for p in var2],'r*')
pylab.plot([p[j] for p in var3],[p[j+1] for p in var3],'y*')
pylab.plot([p[j] for p in var4],[p[j+1] for p in var4],'g*')
#~ pylab.plot([p[0] for p in centroids],[p[1] for p in centroids],'go')
pylab.plot(centroids,centroids,'go')
pylab.savefig("./"+folder+"/kmeans_ANMnalysis.png")
示例10: GetPupilKMeans
def GetPupilKMeans(gray, K = 2, distanceWeight = 2, reSize = (40,40)):
smallI = cv2.resize(gray, reSize)
M,N = smallI.shape
X,Y = np.meshgrid(range(M), range(N))
z = smallI.flatten()
x = X.flatten()
y = Y.flatten()
O = len(x)
features = np.zeros((O, 3))
features[:,0] = z
features[:,1] = y / distanceWeight
features[:,2] = x / distanceWeight
features = np.array(features, 'f')
centroids, variance = kmeans(features, K)
print(centroids)
label, distance = vq(features, centroids)
labelIm = np.array(np.reshape(label, (M, N)))
f = figure(1)
imshow(labelIm)
f.canvas.draw()
f.show()
示例11: detectPupilKMeans
def detectPupilKMeans(gray,K=4,distanceWeight=1,reSize=(30,30)):
smallI = cv2.resize(gray, reSize)
M,N = smallI.shape
X,Y = np.meshgrid(range(M),range(N))
z = smallI.flatten()
x = X.flatten()
y = Y.flatten()
O = len(x)
#make a feature vectors containing (x,y,intensity)
features = np.zeros((O,3))
features[:,0] = z;
features[:,1] = y/distanceWeight; #Divide so that the distance of position weighs less
features[:,2] = x/distanceWeight;
features = np.array(features,'f')
# cluster data
centroids,variance = kmeans(features,K)
#use the found clusters to map
label,distance = vq(features,centroids)
# re-create image from
labelIm = np.array(np.reshape(label,(M,N)))
# Find the lowest valued class
thr = 255
for i in range(K):
if(centroids[i][0] < thr):
thr = centroids[i][0]
return thr
示例12: cluster
def cluster(S,k,ndim):
""" Spectral clustering from a similarity matrix."""
# check for symmetry
if sum(abs(S-S.T)) > 1e-10:
print 'not symmetric'
# create Laplacian matrix
rowsum = sum(abs(S),axis=0)
D = diag(1 / sqrt(rowsum + 1e-6))
L = dot(D,dot(S,D))
# compute eigenvectors of L
U,sigma,V = linalg.svd(L,full_matrices=False)
# create feature vector from ndim first eigenvectors
# by stacking eigenvectors as columns
features = array(V[:ndim]).T
# k-means
features = whiten(features)
centroids,distortion = kmeans(features,k)
code,distance = vq(features,centroids)
return code,V
示例13: buildVLADForEachImageAtDifferentLevels
def buildVLADForEachImageAtDifferentLevels(descriptorsOfImage, level):
# Set width and height
width = descriptorsOfImage.width
height = descriptorsOfImage.height
# calculate width and height step
widthStep = int(width / 2)
heightStep = int(height / 2)
descriptors = descriptorsOfImage.descriptors
# level 1, a list with size = 4 to store histograms at different location
VLADOfLevelOne = np.zeros((4, k, dim))
for descriptor in descriptors:
x = descriptor.x
y = descriptor.y
boundaryIndex = int(x / widthStep) + int(y / heightStep)
feature = descriptor.descriptor
shape = feature.shape[0]
feature = feature.reshape(1, shape)
codes, distance = vq(feature, k_means.cluster_centers_)
VLADOfLevelOne[boundaryIndex][codes[0]] += np.array(feature).reshape(shape) - k_means.cluster_centers_[codes[0]]
for i in xrange(4):
# Square root norm
VLADOfLevelOne[i] = np.sign(VLADOfLevelOne[i]) * np.sqrt(np.abs(VLADOfLevelOne[i]))
# Local L2 norm
vector_norm = np.linalg.norm(VLADOfLevelOne[i], axis = 1)
vector_norm[vector_norm < 1] = 1
VLADOfLevelOne[i] /= vector_norm[:, None]
# level 0
VLADOfLevelZero = VLADOfLevelOne[0] + VLADOfLevelOne[1] + VLADOfLevelOne[2] + VLADOfLevelOne[3]
# Square root norm
VLADOfLevelZero = np.sign(VLADOfLevelZero) * np.sqrt(np.abs(VLADOfLevelZero))
# Local L2 norm
vector_norm = np.linalg.norm(VLADOfLevelZero, axis = 1)
vector_norm[vector_norm < 1] = 1
VLADOfLevelZero /= vector_norm[:, None]
if level == 0:
return VLADOfLevelZero
elif level == 1:
tempZero = VLADOfLevelZero.flatten() * 0.5
tempOne = VLADOfLevelOne.flatten() * 0.5
result = np.concatenate((tempZero, tempOne))
# Global L2 norm
norm = np.linalg.norm(result)
if norm > 1.0:
result /= norm
return result
else:
return None
示例14: project
def project(self,descriptors):
""" 記述子をボキャブラリに射影して、
単語のヒストグラムを作成する """
#drawing = zeros((1000,1000))
dic = {}
# ビジュアルワードのヒストグラム
imhist = zeros((self.nbr_words))
words,distance = vq(descriptors,self.voc)
"""
tmp = list(set(words)) # 重複を排除したwordsを取得
words = np.array(words)
index = []
for t in tmp:
tmp_d = []
index.append( np.where( words == t)[0] )
for i in index:
tmp_d.append([pointors[i,:]])
dic[t] = tmp_d
tmp_d = np.array(dic[t])
dic[t] = np.sort(tmp_d, axis = 0)
print dic[t]
cv2.drawContours(drawing,dic[t],0,(0,255 -t,0),2)
cv2.imshow( "Result", drawing )
cv2.waitKey()
cv2.destroyAllWindows()
"""
for w in words:
imhist[w] += 1
return imhist
示例15: clusterDataSpec
def clusterDataSpec(data, k, algorithm):
'''
Cluster the given data into a number of clusters determined by BIC.
@param data: 2D numpy array holding our data.
@param algorithm:
@raise LogicalError if algorithm is other than "k-means" or "GMM"
@return The predicted labels (clusters) for every example.
'''
if algorithm not in ["k-means", "GMM"]:
raise LogicalError, "Method %s: Clustering is made only through K-means or GMM." %(stack()[0][3])
print "Clustering for k=%d." %(k)
if algorithm == "k-means":
whiten(data)
codebook, _distortion = kmeans(data, k, 10) # 10 iterations only to make it faster
else:
g = GMM(n_components=k,thresh = 1e-05, covariance_type='diag', n_iter=10)
g.fit(data)
#print "Optimal number of clusters according to BIC: %d." %(optimalK)
# Return predicted labels
if algorithm == "k-means":
return vq(data, codebook)[0] # predictions on the same data
else:
return g.predict(data) # predictions on the same data