本文整理汇总了Python中sift.read_features_from_file函数的典型用法代码示例。如果您正苦于以下问题:Python read_features_from_file函数的具体用法?Python read_features_from_file怎么用?Python read_features_from_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_features_from_file函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
def train(self, featurefiles, k=100, subsampling=10):
"""Train a vocabulary from features in files listed in |featurefiles| using
k-means with k words. Subsampling of training data can be used for speedup.
"""
image_count = len(featurefiles)
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
descriptors = descr[0] # Stack features for k-means.
for i in numpy.arange(1, image_count):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
descriptors = numpy.vstack((descriptors, descr[i]))
# Run k-means.
self.voc, distortion = vq.kmeans(descriptors[::subsampling, :], k, 1)
self.word_count = self.voc.shape[0]
# Project training data on vocabulary.
imwords = numpy.zeros((image_count, self.word_count))
for i in range(image_count):
imwords[i] = self.project(descr[i])
occurence_count = numpy.sum((imwords > 0)*1, axis=0)
self.idf = numpy.log(image_count / (occurence_count + 1.0))
self.trainingdata = featurefiles
示例2: train
def train(self, featurefiles, k=100, subsampling=10):
nbr_images = len(featurefiles)
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
descriptors = descr[0]
print "begin loading image feature files..."
for i in np.arange(1, nbr_images):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
# descriptors = np.vstack((descriptors, descr[i]))
descriptors = np.vstack((descriptors, descr[i][::subsampling,:]))
if i%100 == 0:
print i, "images have been loaded..."
print "finish loading image feature files!"
# self.voc, distortion = cluster.kmeans(descriptors[::subsampling,:], k, 1)
print "begin MiniBatchKMeans cluster....patient"
mbk = MiniBatchKMeans(k, init="k-means++", compute_labels=False, n_init=3, init_size=3*k)
# mbk.fit(descriptors[::subsampling,:])
mbk.fit(descriptors)
self.voc = mbk.cluster_centers_
print "cluster finish!"
self.nbr_word = self.voc.shape[0]
imwords = np.zeros((nbr_images, self.nbr_word))
for i in xrange(nbr_images):
imwords[i] = self.project(descr[i])
nbr_occurences = np.sum((imwords > 0)*1, axis=0)
self.idf = np.log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
self.traindata = featurefiles
示例3: train
def train(self,featurefiles,k=100,subsampling=10):
""" Train a vocabulary from features in files listed
in featurefiles using k-means with k number of words.
Subsampling of training data can be used for speedup. """
nbr_images = len(featurefiles)
# read the features from file
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
descriptors = descr[0] #stack all features for k-means
for i in arange(1,nbr_images):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
descriptors = vstack((descriptors,descr[i]))
# k-means: last number determines number of runs
self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
self.nbr_words = self.voc.shape[0]
# go through all training images and project on vocabulary
imwords = zeros((nbr_images,self.nbr_words))
for i in range( nbr_images ):
imwords[i] = self.project(descr[i])
nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
self.trainingdata = featurefiles
示例4: train
def train(self,featurefiles,k=100,subsampling=10):
""" featurefilesに列挙されたファイルから特徴量を読み込み
k平均法とk個のビジュアルワードを用いてボキャブラリを
学習する。subsamplingで教師データを間引いて高速化可能 """
nbr_images = len(featurefiles)
# ファイルから特徴量を読み込む
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
descriptors = descr[0] #stack all features for k-means
for i in arange(1,nbr_images):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
descriptors = vstack((descriptors,descr[i]))
# k平均法:最後の数字で試行数を指定する
self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
self.nbr_words = self.voc.shape[0]
# 教師画像を順番にボキャブラリに射影する
imwords = zeros((nbr_images,self.nbr_words))
for i in range( nbr_images ):
imwords[i] = self.project(descr[i])
nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
self.trainingdata = featurefiles
示例5: get_sift_match
def get_sift_match(f1, f2):
fn1, fext1 = os.path.splitext(os.path.basename(f1))
fn2, fext2 = os.path.splitext(os.path.basename(f2))
try:
l1, d1 = sift.read_features_from_file(TMP_DIR + fn1 + '.key')
l2, d2 = sift.read_features_from_file(TMP_DIR + fn2 + '.key')
return sift.score(d1, d2)
except:
return 0.0
示例6: get_krt
def get_krt(im1, im2):
ims = [im1, im2]
sifts = []
for x in range(2):
sifts.append(ims[x][:-3]+"sift")
# compute features
#sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
sift.process_image(ims[0],sifts[0])
l0,d0 = sift.read_features_from_file(sifts[0])
#sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
sift.process_image(ims[1],sifts[1])
l1,d1 = sift.read_features_from_file(sifts[1])
# match features and estimate homography
matches = sift.match_twosided(d0,d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
print len(ndx2)
tp = homography.make_homog(l1[ndx2,:2].T)
model = homography.RansacModel()
H,ransac_data = homography.H_from_ransac(fp,tp,model)
# camera calibration
#K = camera.my_calibration((747,1000))
K = camera.my_calibration((Image.open(im2).size))
# 3D points at plane z=0 with sides of length 0.2
box = cube.cube_points([0,0,0.1],0.1)
# project bottom square in first image
cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
# first points are the bottom square
box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
# use H to transfer points to the second image
print dot(H,box_cam1)
box_trans = homography.normalize(dot(H,box_cam1))
# compute second camera matrix from cam1 and H
cam2 = camera.Camera(dot(H,cam1.P))
A = dot(linalg.inv(K),cam2.P[:,:3])
A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
cam2.P[:,:3] = dot(K,A)
# project with the second camera
box_cam2 = cam2.project(homography.make_homog(box))
# test: projecting point on z=0 should give the same
point = array([1,1,0,1]).T
print homography.normalize(dot(dot(H,cam1.P),point))
print cam2.project(point)
import pickle
with open('%s.pkl' % ims[1][:-4],'w') as f:
pickle.dump(K,f)
pickle.dump(dot(linalg.inv(K),cam2.P),f)
sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
示例7: cbir_train
def cbir_train(train_path, voc_name, db_name, n_subsample=2000, n_cluster=2000, subfeatsampling=10):
voc_name = voc_name + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling)
db_name = db_name[:-3] + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling) + db_name[-3:]
imlist, featlist = cbir_utils.create_imglist_featlist(train_path)
imlist = imlist[:n_subsample]
featlist = featlist[:n_subsample]
### generate sift feature
nbr_images = len(imlist)
''''''
for i in range(nbr_images):
sift.process_image(imlist[i], featlist[i], mask = True)
### generate visual word
voc = visual_word.Vocabulary(voc_name)
voc.train(featlist, n_cluster, subfeatsampling)
with open(voc_name+'.pkl', 'wb') as f:
cPickle.dump(voc, f)
print 'vocabulary is', voc.name, voc.nbr_word
### generate image index
with open(voc_name+'.pkl', 'rb') as f:
voc = cPickle.load(f)
indx = image_search.Indexer(db_name, voc)
indx.create_tables()
for i in range(nbr_images):
locs, descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i], descr)
indx.db_commit()
print 'generate index finish!'
print 'training over'
示例8: get_descriptors
def get_descriptors(img):
# ImageObjectet var, aminek van mar filename_keypoints attributuma
'''
returns the image as array, the location of features, and the descriptors
'''
loc,desc = sift.read_features_from_file(img.filename_keypoints)
return loc, desc
示例9: runSurf
def runSurf(self):
#save a grayscale image
im = self.image.convert("L")
im.save(self.filename + "_gray.pgm", "PPM")
surfexec = surfpath + " -i " + self.filename + "_gray.pgm" + " -o " + self.filename + "_result.txt"
print surfexec
os.system(surfexec)
self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
示例10: len
def __main__:
nbr_images = len(imlist)
featlist = [ imlist[i][:-3] + 'sif' for i in range(nbr_images))
for i in range(nbr_images):
sift.process_image(imlist[i],featlist[i])
voc = vocabularly.Vocabulary('ukbenchtest')
voc.train(featlist,1000,10)
with open('vocabulary.pkl', 'wb') as f:
pickle.dump(voc,f)
print 'vocabulary is:', voc.name, voc.nbr_wods
nbr_images = len(imlist)
with open('vocabulary.pkl', 'rb') as f:
voc = pickle.load(f)
indx = imagesearch.Indexer('test.db',voc)
indx.create_tables()
for i in range(nbr_images)[:100]:
locs,descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i],descr)
indx.db_commit()
con = sqlite.connect('test.db')
print con.execute('select count (filename) from imlist').fetchone()
print con.execute('select * from imlist').fetchone()
src = imagesearch.Searcher('test.db')
locs,descr = sift.read_features_from_file(featlist[0])
iw = voc.project(descr)
print 'ask using a histogram...'
print src.candidates_from_histogram(iw)[:10]
print 'try a query...'
print src.query(imlist[0])[:10]
示例11: train
def train(self,featurefiles,k=100,subsampling=10):
""" featurefilesに列挙されたファイルから特徴量を読み込み
k平均法とk個のビジュアルワードを用いてボキャブラリを
学習する。subsamplingで教師データを間引いて高速化可能 """
nbr_images = len(featurefiles)
# ファイルから特徴量を読み込む
#points = []
descr = []
descr.append(sift.read_features_from_file(featurefiles[0])[1])
# optional.view feature points.
#points.append( np.array(sift.read_features_from_file(featurefiles[0])[0][:,0:2]) ) # stock of x,y axis value
descriptors = descr[0] #stack all features for k-means
#pointors = points[0]
for i in arange(1,nbr_images):
descr.append(sift.read_features_from_file(featurefiles[i])[1])
#points.append( np.array(sift.read_features_from_file(featurefiles[i])[0][:,0:2]) ) # stock of x,y axis value
descriptors = vstack((descriptors,descr[i]))
# k平均法:最後の数字で試行数を指定する
self.voc,distortion = kmeans(descriptors[::subsampling,:],k,1)
self.nbr_words = self.voc.shape[0]
# 重心を保存しておく
with open('voc_centroid.pkl','wb') as f:
pickle.dump(self.voc,f)
"""
# ワードとx,y座標の辞書作成
dic = []
for i in xrange(len(nbr_images)):
dic[i] = {}
dic[i][]
"""
# 教師画像を順番にボキャブラリに射影する
imwords = zeros((nbr_images,self.nbr_words))
for i in xrange(1): #xrange( nbr_images ):
# imwords[i] = self.project(descr[i], points[i]) # PLSAを使う場合はこちらを使用する
imwords[i] = self.project(descr[i])
nbr_occurences = sum( (imwords > 0)*1 ,axis=0)
self.idf = log( (1.0*nbr_images) / (1.0*nbr_occurences+1) )
self.trainingdata = featurefiles
示例12: plot_sift_feature
def plot_sift_feature(im):
#imname = ’empire.jpg’
#im1 = array(Image.open(imname).convert(’L’))
tmpFile = 'tmp.sift'
sift.process_image(im,tmpFile)
l1,d1 = sift.read_features_from_file(tmpFile)
figure()
gray()
sift.plot_features(im,l1,circle=True)
show()
示例13: read_feature_labels
def read_feature_labels(path):
featlist = [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.dsift')]
features = []
for featfile in featlist:
l, d = sift.read_features_from_file(featfile)
features.append(d.flatten())
features = array(features)
return features
示例14: runsift
def runsift(self):
#save a grayscale image
imsize = self.image.size
im = self.image.resize((imsize[0]/10, imsize[1]/10))
im = im.convert("L")
im.save(self.filename + "_gray.pgm", "PPM")
siftexec = siftpath + self.filename + "_gray.pgm >" + self.filename + "_result.txt"
print siftexec
os.system(siftexec)
self.locators, self.descriptors = sift.read_features_from_file(self.filename + "_result.txt")
示例15: extractSift
def extractSift(input_files):
all_features_dict = {}
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
print "calculating sift features for", fname
sift.process_image(fname, features_fname)
print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict