本文整理汇总了Python中sift.process_image函数的典型用法代码示例。如果您正苦于以下问题:Python process_image函数的具体用法?Python process_image怎么用?Python process_image使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了process_image函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sift_pan_desc_generator
def sift_pan_desc_generator(path='/home/aurora/hdd/workspace/PycharmProjects/data/N20040103G/'):
filelists = getFiles(path)
feature = []
for index, file in enumerate(filelists):
sift.process_image(file, 'pan'+str(index)+'.sift')
feature.append('pan'+str(index)+'.sift')
return feature
示例2: cbir_train
def cbir_train(train_path, voc_name, db_name, n_subsample=2000, n_cluster=2000, subfeatsampling=10):
voc_name = voc_name + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling)
db_name = db_name[:-3] + '_' + str(n_subsample) + '_' + str(n_cluster) + '_' + str(subfeatsampling) + db_name[-3:]
imlist, featlist = cbir_utils.create_imglist_featlist(train_path)
imlist = imlist[:n_subsample]
featlist = featlist[:n_subsample]
### generate sift feature
nbr_images = len(imlist)
''''''
for i in range(nbr_images):
sift.process_image(imlist[i], featlist[i], mask = True)
### generate visual word
voc = visual_word.Vocabulary(voc_name)
voc.train(featlist, n_cluster, subfeatsampling)
with open(voc_name+'.pkl', 'wb') as f:
cPickle.dump(voc, f)
print 'vocabulary is', voc.name, voc.nbr_word
### generate image index
with open(voc_name+'.pkl', 'rb') as f:
voc = cPickle.load(f)
indx = image_search.Indexer(db_name, voc)
indx.create_tables()
for i in range(nbr_images):
locs, descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i], descr)
indx.db_commit()
print 'generate index finish!'
print 'training over'
示例3: sift_aurora_desc_generator
def sift_aurora_desc_generator(path, des):
filelists = getFiles(path)
feature = []
for index, file in enumerate(filelists):
sift.process_image(file, des+str(index)+'.sift')
feature.append(des+str(index)+'.sift')
return feature
示例4: extract_sift_feature
def extract_sift_feature(fname):
fn, fext = os.path.splitext(os.path.basename(fname))
if not os.path.exists(TMP_DIR + fn + '.key'):
im = Image.open(fname)
im_l = im.convert('L')
im_l.save(TMP_DIR + fn + '.pgm', 'PPM')
sift.process_image(TMP_DIR + fn + '.pgm', TMP_DIR + fn + '.key')
os.remove(TMP_DIR + fn + '.pgm')
示例5: plot_sift_feature
def plot_sift_feature(im):
#imname = ’empire.jpg’
#im1 = array(Image.open(imname).convert(’L’))
tmpFile = 'tmp.sift'
sift.process_image(im,tmpFile)
l1,d1 = sift.read_features_from_file(tmpFile)
figure()
gray()
sift.plot_features(im,l1,circle=True)
show()
示例6: get_krt
def get_krt(im1, im2):
ims = [im1, im2]
sifts = []
for x in range(2):
sifts.append(ims[x][:-3]+"sift")
# compute features
#sift.process_image('../../data/book_frontal.JPG','../../data/im0.sift')
sift.process_image(ims[0],sifts[0])
l0,d0 = sift.read_features_from_file(sifts[0])
#sift.process_image('../../data/book_perspective.JPG','../../data/im1.sift')
sift.process_image(ims[1],sifts[1])
l1,d1 = sift.read_features_from_file(sifts[1])
# match features and estimate homography
matches = sift.match_twosided(d0,d1)
ndx = matches.nonzero()[0]
fp = homography.make_homog(l0[ndx,:2].T)
ndx2 = [int(matches[i]) for i in ndx]
print len(ndx2)
tp = homography.make_homog(l1[ndx2,:2].T)
model = homography.RansacModel()
H,ransac_data = homography.H_from_ransac(fp,tp,model)
# camera calibration
#K = camera.my_calibration((747,1000))
K = camera.my_calibration((Image.open(im2).size))
# 3D points at plane z=0 with sides of length 0.2
box = cube.cube_points([0,0,0.1],0.1)
# project bottom square in first image
cam1 = camera.Camera( hstack((K,dot(K,array([[0],[0],[-1]])) )) )
# first points are the bottom square
box_cam1 = cam1.project(homography.make_homog(box[:,:5]))
# use H to transfer points to the second image
print dot(H,box_cam1)
box_trans = homography.normalize(dot(H,box_cam1))
# compute second camera matrix from cam1 and H
cam2 = camera.Camera(dot(H,cam1.P))
A = dot(linalg.inv(K),cam2.P[:,:3])
A = array([A[:,0],A[:,1],cross(A[:,0],A[:,1])]).T
cam2.P[:,:3] = dot(K,A)
# project with the second camera
box_cam2 = cam2.project(homography.make_homog(box))
# test: projecting point on z=0 should give the same
point = array([1,1,0,1]).T
print homography.normalize(dot(dot(H,cam1.P),point))
print cam2.project(point)
import pickle
with open('%s.pkl' % ims[1][:-4],'w') as f:
pickle.dump(K,f)
pickle.dump(dot(linalg.inv(K),cam2.P),f)
sys.stderr.write("K and Rt dumped to %s.pkl\n" % ims[1][:-4])
示例7: get_sift_lowe
def get_sift_lowe(img):
features_fname = img + '.sift'
if os.path.isfile(features_fname) == False:
is_size_zero = sift.process_image(img, features_fname)
if is_size_zero:
os.remove(features_fname)
sift.process_image(img, features_fname)
if os.path.isfile(features_fname) and os.path.getsize(features_fname) == 0:
os.remove(features_fname)
sift.process_image(img, features_fname)
locs, desc = sift.read_features_from_file(features_fname)
return desc
示例8: extractSift
def extractSift(input_files):
all_features_dict = {}
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
print "calculating sift features for", fname
sift.process_image(fname, features_fname)
print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict
示例9: extractSift
def extractSift(input_files,target_folder):
all_features_dict = {}
count=0
for i,fname in enumerate(input_files):
features_fname = target_folder+'/'+fname.split('/')[2].split('.')[0]+'.sift'
if exists(features_fname) == False:
print("Calculating sift features for ",fname)
sift.process_image(fname, features_fname,count)
count+=1
locs, descriptors = sift.read_features_from_file(features_fname)
all_features_dict[fname] = (locs,descriptors)
os.chdir('..')
return all_features_dict
示例10: find_matches
def find_matches(image_names, root):
l = {}
d = {}
n = len(image_names)
for i, im in enumerate(image_names):
resultname = os.path.join(root, '{}.sift'.format(im))
if not os.path.isfile(resultname):
sift.process_image(os.path.join(root, '{}.png'.format(im)), resultname)
l[i], d[i] = sift.read_features_from_file(resultname)
matches = {}
for i in range(n - 1):
matches[i] = sift.match(d[i + 1], d[i])
return matches, l, d
示例11: extractSift
def extractSift(input_files):
all_features_dict = {}
count = 0
for i,fname in enumerate(input_files):
# path to store resulting sift files
features_fname = 'sift_output/'+fname.split('/')[2].split('.')[0]+'.sift'
if count == 0:
os.chdir('siftDemoV4')
print("Calculating sift features for ",fname)
sift.process_image(fname,features_fname,count)
count+=1
locs, descriptors = sift.read_features_from_file(features_fname)
all_features_dict[fname] = descriptors
os.chdir('..')
return all_features_dict
示例12: extractSift
def extractSift(input_files):
print "extracting Sift features"
all_features_dict = {}
for i, fname in enumerate(input_files):
rest_of_path = fname[:-(len(os.path.basename(fname)))]
rest_of_path = os.path.join(rest_of_path, "sift")
rest_of_path = os.path.join(rest_of_path, os.path.basename(fname))
features_fname = rest_of_path + '.sift'
if os.path.exists(features_fname) == False:
# print "calculating sift features for", fname
sift.process_image(fname, features_fname)
# print "gathering sift features for", fname,
locs, descriptors = sift.read_features_from_file(features_fname)
# print descriptors.shape
all_features_dict[fname] = descriptors
return all_features_dict
示例13: extractMF
def extractMF(filename):
features_fname = filename + '.sift'
sift.process_image(filename, features_fname)
locs, descriptors = sift.read_features_from_file(features_fname)
sh = min(locs.shape[0], 1000)
res = np.zeros((sh,SIZE_LOCAL_FEATURE)).astype(np.float32)
extra = [20,False,True,False,0,0,0]
WIN = 5
for i in range(sh):
x = np.int32(round(locs[i][0]))
y = np.int32(round(locs[i][1]))
I = Image.open(filename)
Nx,Ny = I.size
a = sg.spec(I.crop((max(x-WIN,0),max(y-WIN,0),min(x+WIN,Nx-1),min(y+WIN,Ny-1))),extra)
res[i] = a
print res.shape
return res
示例14: len
def __main__:
nbr_images = len(imlist)
featlist = [ imlist[i][:-3] + 'sif' for i in range(nbr_images))
for i in range(nbr_images):
sift.process_image(imlist[i],featlist[i])
voc = vocabularly.Vocabulary('ukbenchtest')
voc.train(featlist,1000,10)
with open('vocabulary.pkl', 'wb') as f:
pickle.dump(voc,f)
print 'vocabulary is:', voc.name, voc.nbr_wods
nbr_images = len(imlist)
with open('vocabulary.pkl', 'rb') as f:
voc = pickle.load(f)
indx = imagesearch.Indexer('test.db',voc)
indx.create_tables()
for i in range(nbr_images)[:100]:
locs,descr = sift.read_features_from_file(featlist[i])
indx.add_to_index(imlist[i],descr)
indx.db_commit()
con = sqlite.connect('test.db')
print con.execute('select count (filename) from imlist').fetchone()
print con.execute('select * from imlist').fetchone()
src = imagesearch.Searcher('test.db')
locs,descr = sift.read_features_from_file(featlist[0])
iw = voc.project(descr)
print 'ask using a histogram...'
print src.candidates_from_histogram(iw)[:10]
print 'try a query...'
print src.query(imlist[0])[:10]
示例15: extractSift
def extractSift(input_files):
print "extracting Sift features"
all_features_dict = {}
#all_features = zeros([1,128])
for i, fname in enumerate(input_files):
features_fname = fname + '.sift'
if exists(features_fname) == False:
print "calculating sift features for", fname
sift.process_image(fname, features_fname)
locs, descriptors = sift.read_features_from_file(features_fname)
# print descriptors.shape
all_features_dict[fname] = descriptors
# if all_features.shape[0] == 1:
# all_features = descriptors
# else:
# all_features = concatenate((all_features, descriptors), axis = 0)
return all_features_dict