本文整理汇总了Python中menpo.io.import_landmark_file函数的典型用法代码示例。如果您正苦于以下问题:Python import_landmark_file函数的具体用法?Python import_landmark_file怎么用?Python import_landmark_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了import_landmark_file函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: process_frame
def process_frame(frame_name, clip, img_type, svm_p, loop=False):
"""
Applies the AAM fitter (global var) in a frame. Additionally, it might apply an
SVM to verify it's a face if required.
:param frame_name: str: Name of the frame along with extension, e.g. '000001.png'.
:param clip: str: Name of the clip.
:param img_type: str: Suffix (extension) of the frames, e.g. '.png'.
:param svm_p: dict: Required params for SVM classification.
:param loop: bool: (optional) Declares whether this is a 2nd fit for AAM (loop).
:return:
"""
global fitter
name = frame_name[:frame_name.rfind('.')]
p0 = clip.path_read_ln[0] + name + '_0.pts'
# find if this is 2nd fit or 1st.
if loop: # if 2nd fit, then if landmark is 'approved', return. Otherwise proceed.
try:
ln = import_landmark_file(p0)
copy2(p0, clip.path_write_ln[0] + name + '_0.pts')
return # if the landmark already exists, return (for performance improvement)
except ValueError:
pass
try:
ln = import_landmark_file(clip.path_read_ln[1] + name + '_0.pts')
except ValueError: # either not found or no suitable importer
return
else:
try:
ln = import_landmark_file(p0)
except ValueError: # either not found or no suitable importer
return
im = im_read_greyscale(frame_name, clip.path_frames, img_type)
if not im:
return
im.landmarks['PTS2'] = ln
fr = fitter.fit_from_shape(im, im.landmarks['PTS2'].lms, crop_image=0.3)
p_wr = clip.path_write_ln[0] + im.path.stem + '_0.pts'
export_landmark_file(fr.fitted_image.landmarks['final'], p_wr, overwrite=True)
# apply SVM classifier by extracting patches (is face or not).
if not svm_p['apply']:
return
im.landmarks.clear() # temp solution
im.landmarks['ps_pbaam'] = fr.fitted_image.landmarks['final']
im_cp = im.crop_to_landmarks_proportion(0.2, group='ps_pbaam')
im_cp = svm_p['feat'](im_cp)
im2 = warp_image_to_reference_shape(im_cp, svm_p['refFrame'], 'ps_pbaam')
_p_nd = im2.extract_patches_around_landmarks(group='source', as_single_array=True,
patch_shape=svm_p['patch_s']).flatten()
if svm_p['clf'].decision_function(_p_nd) > 0:
copy2(p_wr, clip.path_write_ln[1] + im.path.stem + '_0.pts')
示例2: test_json_landmarks_bunny_direct
def test_json_landmarks_bunny_direct():
lms = mio.import_landmark_file(mio.data_path_to('bunny.ljson'))
labels = {'reye', 'mouth', 'nose', 'leye'}
assert(len(labels - set(lms.labels)) == 0)
assert_allclose(lms['leye'].points, bunny_leye, atol=1e-7)
assert_allclose(lms['reye'].points, bunny_reye, atol=1e-7)
assert_allclose(lms['nose'].points, bunny_nose, atol=1e-7)
assert_allclose(lms['mouth'].points, bunny_mouth, atol=1e-7)
示例3: test_json_landmarks_bunny_direct
def test_json_landmarks_bunny_direct():
lms = pio.import_landmark_file(pio.data_path_to('bunny.json'))
assert(lms.group_label == 'JSON')
labels = {'r_eye', 'mouth', 'nose', 'l_eye'}
assert(len(labels - set(lms.labels)) == 0)
assert_allclose(lms['l_eye'].lms.points, bunny_l_eye, atol=1e-7)
assert_allclose(lms['r_eye'].lms.points, bunny_r_eye, atol=1e-7)
assert_allclose(lms['nose'].lms.points, bunny_nose, atol=1e-7)
assert_allclose(lms['mouth'].lms.points, bunny_mouth, atol=1e-7)
示例4: test_register_landmark_importer
def test_register_landmark_importer(is_file):
from menpo.shape import PointCloud
lmark = PointCloud.init_2d_grid((1, 1))
def foo_importer(filepath, **kwargs):
return lmark
is_file.return_value = True
with patch.dict(mio.input.extensions.image_landmark_types, {}, clear=True):
mio.register_landmark_importer('.foo', foo_importer)
new_lmark = mio.import_landmark_file('fake.foo')
assert lmark is new_lmark
示例5: load_images
def load_images(list_frames, frames_path, path_land, clip_name, max_images=None,
training_images=None, crop_reading=0.3, pix_thres=330, feat=None):
"""
Read images from the clips that are processed. The landmarks can be a different folder with the extension of pts and
are searched as such.
:param list_frames: List of images that will be read and loaded.
:param frames_path: Path to the folder of images.
:param path_land: Path of the respective landmarks.
:param clip_name: The name of the clip being processed.
:param max_images: (optional) Max images that will be loaded from this clip.
:param training_images: (optional) List of images to append the new ones.
:param crop_reading: (optional) Amount of cropping the image around the landmarks.
:param pix_thres: (optional) If the cropped image has a dimension bigger than this, it gets cropped to this diagonal dimension.
:param feat: (optional) Features to be applied to the images before inserting them to the list.
:return: List of menpo images.
"""
from random import shuffle
if not check_path_and_landmarks(frames_path, clip_name, path_land):
return []
if feat is None:
feat = no_op
if training_images is None:
training_images = []
shuffle(list_frames) # shuffle the list to ensure random ones are chosen
if max_images is None:
max_images = len(list_frames)
elif max_images < 0:
print('Warning: The images cannot be negative, loading the whole list instead.')
max_images = len(list_frames)
cnt = 0 # counter for images appended to the list
for frame_name in list_frames:
try:
im = mio.import_image(frames_path + frame_name, normalise=True)
except ValueError: # in case the extension is unknown (by menpo)
print('Ignoring the \'image\' {}.'.format(frame_name))
continue
res = glob.glob(path_land + clip_name + sep + im.path.stem + '*.pts')
if len(res) == 0: # if the image does not have any existing landmarks, ignore it
continue
elif len(res) > 1:
#_r = randint(0,len(res)-1); #just for debugging reasons in different variable
#ln = mio.import_landmark_file(res[_r]) # in case there are plenty of landmarks for the image, load random ones
print('The image {} has more than one landmarks, for one person, loading only the first ones.'.format(frame_name))
ln = mio.import_landmark_file(res[0])
im.landmarks['PTS'] = ln
im = crop_rescale_img(im, crop_reading=crop_reading, pix_thres=pix_thres)
training_images.append(feat(im))
cnt += 1
if cnt >= max_images:
break # the limit of images (appended to the list) is reached
return training_images
示例6: load_image
def load_image(path, reference_shape, is_training=False, group='PTS',
mirror_image=False):
"""Load an annotated image.
In the directory of the provided image file, there
should exist a landmark file (.pts) with the same
basename as the image file.
Args:
path: a path containing an image file.
reference_shape: a numpy array [num_landmarks, 2]
is_training: whether in training mode or not.
group: landmark group containing the grounth truth landmarks.
mirror_image: flips horizontally the image's pixels and landmarks.
Returns:
pixels: a numpy array [width, height, 3].
estimate: an initial estimate a numpy array [68, 2].
gt_truth: the ground truth landmarks, a numpy array [68, 2].
"""
im = mio.import_image(path)
bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
if 'set' not in str(bb_root):
bb_root = im.path.parent.relative_to(im.path.parent.parent)
im.landmarks['bb'] = mio.import_landmark_file(str(Path('bbs') / bb_root / (
im.path.stem + '.pts')))
im = im.crop_to_landmarks_proportion(0.3, group='bb')
reference_shape = PointCloud(reference_shape)
bb = im.landmarks['bb'].lms.bounding_box()
im.landmarks['__initial'] = align_shape_with_bounding_box(reference_shape,
bb)
im = im.rescale_to_pointcloud(reference_shape, group='__initial')
if mirror_image:
im = utils.mirror_image(im)
lms = im.landmarks[group].lms
initial = im.landmarks['__initial'].lms
# if the image is greyscale then convert to rgb.
pixels = grey_to_rgb(im).pixels.transpose(1, 2, 0)
gt_truth = lms.points.astype(np.float32)
estimate = initial.points.astype(np.float32)
return pixels.astype(np.float32).copy(), gt_truth, estimate
示例7: _aux
def _aux(im, pts_paths, pts_names, pts_formats, save_path, save_original, off1, off2, figure_size, overwrite, render_options, only_ln=False):
if only_ln: # case of visualising only landmarks (black background)
path_tmp = im.path
im = Image.init_blank([im.shape[0], im.shape[1]], im.n_channels)
im.path = path_tmp
# attach landmarks
for k, pts_path in enumerate(pts_paths):
if os.path.isfile(pts_path + im.path.stem + pts_formats[k]):
pts = mio.import_landmark_file(pts_path + im.path.stem + pts_formats[k])
im.landmarks[pts_names[k]] = pts
# copy original if asked
if save_original:
im_orig = im.copy()
# crop
if pts_names[0] in im.landmarks.group_labels:
centre = im.landmarks[pts_names[0]].lms.centre()
min_indices = np.array([round(centre[0])-off1, round(centre[1])-off2])
max_indices = np.array([round(centre[0])+off1, round(centre[1])+off2])
# im.crop_inplace(min_indices, max_indices)
im = im.crop(min_indices, max_indices, constrain_to_boundary=True)
else:
path_tmp = im.path
im = Image.init_blank([off1*2 + 1, off2*2 + 1], im.n_channels)
im.path = path_tmp
# render
rand = randint(1, 10000)
fig = plt.figure(rand)
if save_original:
gs = gridspec.GridSpec(1, 2, width_ratios=[im_orig.height, im.height])
plt.subplot(gs[0])
renderer = _render(im_orig, pts_names, fig, render_options['colours'][0],
render_options['sizes'][0], render_options['edgesizes'][0], figure_size)
plt.subplot(gs[1])
renderer = _render(im, pts_names, fig, render_options['colours'][1],
render_options['sizes'][1], render_options['edgesizes'][1], figure_size)
else:
renderer = _render(im, pts_names, fig, render_options['colours'][1],
render_options['sizes'][1], render_options['edgesizes'][1], figure_size)
renderer.save_figure(save_path + im.path.stem + '.png', format='png', pad_inches=0.0, overwrite=overwrite)
plt.close(rand)
示例8: test_importing_v2_ljson_null_values
def test_importing_v2_ljson_null_values(is_file, mock_open, mock_dict):
v2_ljson = { "labels": [
{ "label": "left_eye", "mask": [0, 1, 2] },
{ "label": "right_eye", "mask": [3, 4, 5] }
],
"landmarks": {
"connectivity": [ [0, 1], [1, 2], [2, 0], [3, 4],
[4, 5], [5, 3] ],
"points": [ [None, 200.5], [None, None],
[316.8, 199.15], [339.48, 205.0],
[358.54, 217.82], [375.0, 233.4]]
},
"version": 2 }
mock_dict.return_value = v2_ljson
is_file.return_value = True
lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson')
nan_points = np.isnan(lmark.lms.points)
assert nan_points[0, 0] # y-coord None point is nan
assert not nan_points[0, 1] # x-coord point is not nan
assert np.all(nan_points[1, :]) # all of leye label is nan
示例9: test_importing_v1_ljson_null_values
def test_importing_v1_ljson_null_values(is_file, mock_open, mock_dict):
v1_ljson = { "groups": [
{ "connectivity": [ [ 0, 1 ], [ 1, 2 ], [ 2, 3 ] ],
"label": "chin", "landmarks": [
{ "point": [ 987.9, 1294.1 ] }, { "point": [ 96.78, 1246.8 ] },
{ "point": [ None, 0.1 ] }, { "point": [303.22, 167.2 ] } ] },
{ "connectivity": [ [ 0, 1 ] ],
"label": "leye", "landmarks": [
{ "point": [ None, None ] },
{ "point": [ None, None ] }] }
], "version": 1 }
mock_dict.return_value = v1_ljson
is_file.return_value = True
with warnings.catch_warnings(record=True) as w:
lmark = mio.import_landmark_file('fake_lmark_being_mocked.ljson')
nan_points = np.isnan(lmark.lms.points)
# Should raise deprecation warning
assert len(w) == 1
assert nan_points[2, 0] # y-coord None point is nan
assert not nan_points[2, 1] # x-coord point is not nan
assert np.all(nan_points[4:, :]) # all of leye label is nan
示例10: test_import_landmark_file
def test_import_landmark_file():
lm_path = mio.data_dir_path() / 'einstein.pts'
mio.import_landmark_file(lm_path)
示例11: test_export_filepath_overwrite_exists
import numpy as np
from mock import patch, PropertyMock
from nose.tools import raises
import sys
import menpo.io as mio
from menpo.image import Image
builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'
test_lg = mio.import_landmark_file(mio.data_path_to('breakingbad.pts'))
nan_lg = test_lg.copy()
nan_lg.lms.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
fake_path = '/tmp/test.fake'
@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
exists.return_value = True
mio.export_landmark_file(test_lg, fake_path, overwrite=True)
mock_open.assert_called_once_with('wb')
landmark_types.__getitem__.assert_called_once_with('.fake')
export_function = landmark_types.__getitem__.return_value
export_function.assert_called_once()
@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
示例12: test_export_filepath_overwrite_exists
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from nose.tools import raises
import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure
builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'
test_lg = mio.import_landmark_file(mio.data_path_to('lenna.ljson'))
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'
@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
exists.return_value = True
landmark_types.__contains__.return_value = True
mio.export_landmark_file(test_lg, fake_path, overwrite=True)
mock_open.assert_called_with('wb')
示例13: test_import_landmark_file
def test_import_landmark_file():
lm_path = os.path.join(mio.data_dir_path(), 'einstein.pts')
mio.import_landmark_file(lm_path)
示例14: load_images
def load_images(paths, group=None, verbose=True):
"""Loads and rescales input images to the diagonal of the reference shape.
Args:
paths: a list of strings containing the data directories.
reference_shape: a numpy array [num_landmarks, 2]
group: landmark group containing the grounth truth landmarks.
verbose: boolean, print debugging info.
Returns:
images: a list of numpy arrays containing images.
shapes: a list of the ground truth landmarks.
reference_shape: a numpy array [num_landmarks, 2].
shape_gen: PCAModel, a shape generator.
"""
images = []
shapes = []
bbs = []
reference_shape = PointCloud(build_reference_shape(paths))
for path in paths:
if verbose:
print('Importing data from {}'.format(path))
for im in mio.import_images(path, verbose=verbose, as_generator=True):
group = group or im.landmarks[group]._group_label
bb_root = im.path.parent.relative_to(im.path.parent.parent.parent)
if 'set' not in str(bb_root):
bb_root = im.path.parent.relative_to(im.path.parent.parent)
im.landmarks['bb'] = mio.import_landmark_file(str(Path(
'bbs') / bb_root / (im.path.stem + '.pts')))
im = im.crop_to_landmarks_proportion(0.3, group='bb')
im = im.rescale_to_pointcloud(reference_shape, group=group)
im = grey_to_rgb(im)
images.append(im.pixels.transpose(1, 2, 0))
shapes.append(im.landmarks[group].lms)
bbs.append(im.landmarks['bb'].lms)
train_dir = Path(FLAGS.train_dir)
mio.export_pickle(reference_shape.points, train_dir / 'reference_shape.pkl', overwrite=True)
print('created reference_shape.pkl using the {} group'.format(group))
pca_model = detect.create_generator(shapes, bbs)
# Pad images to max length
max_shape = np.max([im.shape for im in images], axis=0)
max_shape = [len(images)] + list(max_shape)
padded_images = np.random.rand(*max_shape).astype(np.float32)
print(padded_images.shape)
for i, im in enumerate(images):
height, width = im.shape[:2]
dy = max(int((max_shape[1] - height - 1) / 2), 0)
dx = max(int((max_shape[2] - width - 1) / 2), 0)
lms = shapes[i]
pts = lms.points
pts[:, 0] += dy
pts[:, 1] += dx
lms = lms.from_vector(pts)
padded_images[i, dy:(height+dy), dx:(width+dx)] = im
return padded_images, shapes, reference_shape.points, pca_model
示例15: test_export_filepath_overwrite_exists
from numpy.testing import assert_allclose
import os
from pathlib import PosixPath, WindowsPath, Path
from mock import patch, PropertyMock, MagicMock
from pytest import raises
import menpo.io as mio
from menpo.io.utils import _norm_path
from menpo.image import Image
from menpo.io.output.pickle import pickle_paths_as_pure
builtins_str = '__builtin__' if sys.version_info[0] == 2 else 'builtins'
test_lg = mio.import_landmark_file(mio.data_path_to('lenna.ljson'),
group='LJSON')
nan_lg = test_lg.copy()
nan_lg.points[0, :] = np.nan
test_img = Image(np.random.random([100, 100]))
colour_test_img = Image(np.random.random([3, 100, 100]))
fake_path = '/tmp/test.fake'
@patch('menpo.io.output.base.landmark_types')
@patch('menpo.io.output.base.Path.exists')
@patch('menpo.io.output.base.Path.open')
def test_export_filepath_overwrite_exists(mock_open, exists, landmark_types):
exists.return_value = True
landmark_types.__contains__.return_value = True
mio.export_landmark_file(test_lg, fake_path, overwrite=True)
mock_open.assert_called_with('wb')