本文整理汇总了Python中menpo.io.import_images函数的典型用法代码示例。如果您正苦于以下问题:Python import_images函数的具体用法?Python import_images怎么用?Python import_images使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了import_images函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_frames_max_bbox
def generate_frames_max_bbox(frames_path, frames_format, pts_paths, pts_formats, pts_names, save_path,
proportion, figure_size, overwrite, save_original,
render_options, only_ln=False, verbose=True):
# find crop offset
print('Computing max bounding box:')
bounds_x = []
bounds_y = []
try:
if len(os.listdir(pts_paths[0])) == 0:
raise IndexError()
except IndexError:
if len(pts_paths) > 0:
print('The directory of landmarks (%s) is empty, returning' % pts_paths[0])
return
for s in mio.import_landmark_files(pts_paths[0] + '*.pts', verbose=verbose):
min_b, max_b = s.lms.bounds()
bounds_x.append(max_b[0] - min_b[0])
bounds_y.append(max_b[1] - min_b[1])
off1 = round(max(bounds_x) * (1. + proportion) / 2)
off2 = round(max(bounds_y) * (1. + proportion) / 2)
print('\nLoad images, crop and save:')
try:
from joblib import Parallel, delayed
Parallel(n_jobs=-1, verbose=4)(delayed(_aux)(im, pts_paths, pts_names, pts_formats, save_path, save_original,
off1, off2, figure_size, overwrite, render_options, only_ln=only_ln)
for im in mio.import_images(frames_path + '*' + frames_format, verbose=False));
except:
print('Sequential execution')
for im in mio.import_images(frames_path + '*' + frames_format, verbose=verbose):
_aux(im, pts_paths, pts_names, pts_formats, save_path, save_original,
off1, off2, figure_size, overwrite, render_options, only_ln=only_ln);
示例2: trainAAMObject
def trainAAMObject(self):
try :
from menpo.feature import fast_dsift
except :
pass
#detector = load_dlib_frontal_face_detector()
def load_image(i):
i = i.crop_to_landmarks_proportion(0.5)
if i.n_channels == 3:
i = i.as_greyscale()
# This step is actually quite important! If we are using
# an AAM and a PiecewiseAffine transform then we need
# to ensure that our triangulation is sensible so that
# we don't end up with ugly skinny triangles. Luckily,
# we provide a decent triangulation in the landmarks
# package.
labeller(i, 'PTS', ibug_face_68_trimesh)
return i
training_images_path = Path(pathToTrainset)
training_images = [load_image(i) for i in mio.import_images(training_images_path, verbose=True)]
aam = HolisticAAM(
training_images,
group='ibug_face_68_trimesh',
scales=(0.5, 1.0),
diagonal=150,
max_appearance_components=200,
max_shape_components=20,
verbose=True
)
pickle.dump(aam, open(AAMFile, "wb"))
示例3: train_aic_rlms
def train_aic_rlms(trainset, output, n_train_imgs=None):
training_images = []
# load landmarked images
for i in mio.import_images(Path(trainset) / '*', verbose=True, max_images=n_train_imgs):
# crop image
i = i.crop_to_landmarks_proportion(0.5)
labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
# convert it to greyscale if needed
if i.n_channels == 3:
i = i.as_greyscale(mode='average')
# append it to the list
training_images.append(i)
offsets = np.meshgrid(range(-0, 1, 1), range(-0, 1, 1))
offsets = np.asarray([offsets[0].flatten(), offsets[1].flatten()]).T
np.seterr(divide ='ignore')
np.seterr(invalid ='ignore')
unified = UnifiedAAMCLM(training_images,
parts_shape=(17, 17),
offsets=offsets,
group = test_group,
holistic_features=fast_dsift,
diagonal=100,
scales=(1, .5),
max_appearance_components = min(50,int(n_train_imgs/2)),
verbose=True)
n_appearance=[min(25,int(n_train_imgs/2)), min(50,int(n_train_imgs/2))]
fitter = UnifiedAAMCLMFitter(unified, algorithm_cls=AICRLMS, n_shape=[3, 12], n_appearance=n_appearance)
return fitter
示例4: load_frgc
def load_frgc(session_id, recreate_meshes=False,
output_base_path=Path('/vol/atlas/homes/pts08/'),
input_base_path=Path('/vol/atlas/databases/frgc'),
max_images=None):
previously_pickled_path = output_base_path / 'frgc_{0}_68_cleaned.pkl'.format(session_id)
abs_files_path = input_base_path / session_id / '*.abs'
if not recreate_meshes and previously_pickled_path.exists():
with open(str(previously_pickled_path)) as f:
images = cPickle.load(f)
else:
# Add the custom ABS importer
from menpo.io.input.extensions import image_types
image_types['.abs'] = ABSImporter
images = []
for i, im in enumerate(mio.import_images(abs_files_path,
max_images=max_images,
verbose=True)):
if im.n_landmark_groups > 0:
preprocess_image(im)
images.append(im)
# Only dump the saved images if we loaded all of them!
if max_images is None:
with open(str(previously_pickled_path), 'wb') as f:
cPickle.dump(images, f, protocol=2)
return images
示例5: load_frgc
def load_frgc(session_id, recreate_meshes=False,
output_base_path='/vol/atlas/homes/pts08/',
input_base_path='/vol/atlas/databases/frgc',
max_images=None):
previously_pickled_path = os.path.join(
output_base_path, 'frgc_{0}_68_cleaned.pkl'.format(session_id))
abs_files_path = os.path.join(input_base_path, session_id, '*.abs')
if not recreate_meshes and os.path.exists(previously_pickled_path):
with open(previously_pickled_path) as f:
images = cPickle.load(f)
else:
all_images = list(mio.import_images(abs_files_path,
max_images=max_images))
images = [im for im in all_images if im.n_landmark_groups == 1]
print '{0}% of the images had landmarks'.format(
len(images) / float(len(all_images)) * 100)
for i, im in enumerate(images):
preprocess_image(im)
print_replace_line(
'Image {0} of {1} cleaned'.format(i + 1, len(images)))
# Only dump the saved images if we loaded all of them!
if max_images is None:
cPickle.dump(images, open(previously_pickled_path, 'wb'),
protocol=2)
return images
示例6: load_n_create_generator
def load_n_create_generator(pattern, detector_name,
group=None, overwrite=False):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
try:
detector = _DETECTORS[detector_name]()
except KeyError:
detector_list = ', '.join(list(_DETECTORS.keys()))
raise ValueError('Valid detector types are: {}'.format(detector_list))
print('Running {} detector on {}'.format(detector_name, pattern))
bboxes = [(img, detect_and_check(img, detector, group=group))
for img in mio.import_images(pattern, normalise=False,
verbose=True)]
# find all the detections that did not fail
detections = filter(lambda x: x[1] is not None, bboxes)
print('Creating a model out of {} detections.'.format(len(detections)))
# normalize these to size [1, 1], centred on origin
normed_detections = [
normalize(im.landmarks[group].lms.bounding_box()).apply(det)
for im, det in detections
]
# build a PCA model from good detections
pca = PCAModel(normed_detections)
mio.export_pickle(pca, '{}_gen.pkl'.format(detector_name), overwrite=overwrite)
示例7: test_import_images_are_ordered_and_unduplicated
def test_import_images_are_ordered_and_unduplicated():
# we know that import_images returns images in path order
imgs = list(mio.import_images(mio.data_dir_path()))
imgs_filenames = [i.path.stem for i in imgs]
print(imgs_filenames)
exp_imgs_filenames = ['breakingbad', 'einstein', 'lenna', 'menpo_thumbnail', 'takeo', 'tongue']
assert exp_imgs_filenames == imgs_filenames
示例8: generate_dataset
def generate_dataset():
with managed_dataset('lfpw-test') as p:
for img in mio.import_images(p / '*.png', max_images=20,
normalise=False, shuffle=True,
landmark_resolver=_resolver):
img.landmarks['gt'] = ibug_face_68(img.landmarks['gt'])[1]
yield img.path.stem, img
示例9: test_import_lazy_list
def test_import_lazy_list():
from menpo.base import LazyList
data_path = mio.data_dir_path()
ll = mio.import_images(data_path)
assert isinstance(ll, LazyList)
ll = mio.import_landmark_files(data_path)
assert isinstance(ll, LazyList)
示例10: load_test_data
def load_test_data(testset, n_test_imgs=None):
test_images = []
for i in mio.import_images(Path(testset), verbose=True, max_images=n_test_imgs):
i = i.crop_to_landmarks_proportion(0.5)
labeller(i, 'PTS', face_ibug_68_to_face_ibug_66_trimesh)
if i.n_channels == 3:
i = i.as_greyscale(mode='average')
test_images.append(i)
return test_images
示例11: read_images
def read_images(img_glob, normalise):
# Read the training set into memory.
images = []
for img_orig in mio.import_images(img_glob, verbose=True, normalise=normalise):
if not img_orig.has_landmarks:
continue
# Convert to greyscale and crop to landmarks.
img = img_orig.as_greyscale(mode='average').crop_to_landmarks_proportion_inplace(0.5)
#img = img.resize((MAX_FACE_WIDTH, img.shape[1]*(MAX_FACE_WIDTH/img.shape[0])))
images.append(img)
return np.array(images)
示例12: test_trained_aam_default_dataset
def test_trained_aam_default_dataset(self, i_image_count = 800):
dataset = os.path.join( GetDirectory(__file__) , 'lfpw')
Model = AAM(dataset, i_debug = True)
testset = os.path.join(dataset, 'testset', '*')
forward_backward_errors = [Model.FitAnnotatedImage(Model.LoadImage(img))
for img in menpoio.import_images(testset, max_images=800, verbose = True) ]
# Ensure mean error < 0.1 - Experimentally derived
err = 0
for error in forward_backward_errors:
err = err + error.final_error()
self.assertTrue( err / len(forward_backward_errors) < 0.1 )
示例13: save_bounding_boxes
def save_bounding_boxes(pattern, detector_type, group=None,
sythesize_problematic=False, overwrite=False):
import menpo.io as mio
from menpo.landmark import LandmarkGroup
from menpo.model import PCAModel
try:
detector = _DETECTORS[detector_type]()
except KeyError:
detector_list = ', '.join(list(_DETECTORS.keys()))
raise ValueError('Valid detector types are: {}'.format(detector_list))
print('Running {} detector on {}'.format(detector_type, pattern))
bboxes = {img.path: detect_and_check(img, detector, group=group)
for img in mio.import_images(pattern, normalise=False,
verbose=True)}
# find all the detections that failed
problematic = filter(lambda x: x[1]['d'] is None, bboxes.items())
print('Failed to detect {} objects'.format(len(problematic)))
if len(problematic) > 0 and sythesize_problematic:
print('Learning detector traits and sythesizing fits for {} '
'images'.format(len(problematic)))
# get the good detections
detections = filter(lambda x: x['d'] is not None, bboxes.values())
# normalize these to size [1, 1], centred on origin
normed_detections = [normalize(r['gt']).apply(r['d'])
for r in detections]
# build a PCA model from good detections
pca = PCAModel(normed_detections)
for p, r in problematic:
# generate a new bbox offset in the normalized space by using
# our learnt PCA basis
d = random_instance(pca)
# apply an inverse transform to place it on the image
bboxes[p]['d'] = normalize(r['gt']).pseudoinverse().apply(d)
to_save = len(bboxes)
if not sythesize_problematic:
to_save = to_save - len(problematic)
print('Saving out {} {} detections'.format(to_save, detector_type))
# All done, save out results
for p, r in bboxes.items():
if r['d'] is not None:
lg = LandmarkGroup.init_with_all_label(r['d'])
mio.export_landmark_file(lg, p.parent /
(p.stem + '_{}.ljson'.format(detector_type)),
overwrite=overwrite)
示例14: main_for_ps_detector
def main_for_ps_detector(path_clips, in_bb_fol, out_bb_fol, out_model_fol, out_landmarks_fol, overwrite=False):
# define a dictionary for the paths
paths = {}
paths['clips'] = path_clips
paths['in_bb'] = path_clips + in_bb_fol # existing bbox of detection
paths['out_bb'] = path_clips + out_bb_fol # save bbox of detection
paths['out_lns'] = path_clips + out_landmarks_fol
paths['out_model'] = mkdir_p(path_clips + out_model_fol) # path that trained models will be saved.
# Log file output.
log = mkdir_p(path_clips + 'logs' + sep) + datetime.now().strftime("%Y.%m.%d.%H.%M.%S") + '_2_ffld.log'
sys.stdout = Logger(log)
print_fancy('Training person specific model with FFLD')
list_clips = sorted(os.listdir(path_clips + frames))
img_type = check_img_type(list_clips, path_clips + frames)
negative_images = [i.as_greyscale(mode='channel', channel=1) for i in mio.import_images('/vol/atlas/homes/pts08/non_person_images',
normalise=False, max_images=300)]
[process_clip(clip_name, paths, img_type, negative_images, overwrite=overwrite) for clip_name in list_clips];
示例15: load_database
def load_database(path_to_images, save_path, db_name, crop_percentage,
fast, group, verbose=False):
# create filename
if group is not None:
filename = (db_name + '_' + group.__name__ + '_crop' +
str(int(crop_percentage * 100)))
else:
filename = db_name + 'PTS' + '_crop' + str(int(crop_percentage * 100))
if fast:
filename += '_menpofast.pickle'
else:
filename += '_menpo.pickle'
save_path = os.path.join(save_path, filename)
# check if file exists
if file_exists(save_path):
if verbose:
print_dynamic('Loading images...')
images = pickle_load(save_path)
if verbose:
print_dynamic('Images Loaded.')
else:
# load images
images = []
for i in mio.import_images(path_to_images, verbose=verbose):
if fast:
i = convert_from_menpo(i)
i.crop_to_landmarks_proportion_inplace(crop_percentage, group='PTS')
if group is not None:
labeller(i, 'PTS', group)
if i.n_channels == 3:
i = i.as_greyscale(mode='average')
images.append(i)
# save images
pickle_dump(images, save_path)
# return images
return images