本文整理汇总了Python中menpo.transform.Scale.apply_inplace方法的典型用法代码示例。如果您正苦于以下问题:Python Scale.apply_inplace方法的具体用法?Python Scale.apply_inplace怎么用?Python Scale.apply_inplace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类menpo.transform.Scale
的用法示例。
在下文中一共展示了Scale.apply_inplace方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: shapes
# 需要导入模块: from menpo.transform import Scale [as 别名]
# 或者: from menpo.transform.Scale import apply_inplace [as 别名]
def shapes(self, as_points=False):
r"""
Generates a list containing the shapes obtained at each fitting
iteration.
Parameters
-----------
as_points: boolean, optional
Whether the results is returned as a list of PointClouds or
ndarrays.
Default: False
Returns
-------
shapes: :class:`menpo.shape.PointCoulds or ndarray list
A list containing the shapes obtained at each fitting iteration.
"""
n = self.n_levels - 1
shapes = []
for j, f in enumerate(self.fittings):
if self.scaled_levels:
transform = Scale(self.downscale ** (n - j), 2)
for t in f.shapes(as_points=as_points):
transform.apply_inplace(t)
shapes.append(self._affine_correction.apply(t))
else:
for t in f.shapes(as_points=as_points):
shapes.append(self._affine_correction.apply(t))
return shapes
示例2: _train
# 需要导入模块: from menpo.transform import Scale [as 别名]
# 或者: from menpo.transform.Scale import apply_inplace [as 别名]
def _train(self, original_images, group=None, bounding_box_group_glob=None,
verbose=False):
r"""
"""
# Dlib does not support incremental builds, so we must be passed a list
if not isinstance(original_images, list):
original_images = list(original_images)
# We use temporary landmark groups - so we need the group key to not be
# None
if group is None:
group = original_images[0].landmarks.group_labels[0]
# Temporarily store all the bounding boxes for rescaling
for i in original_images:
i.landmarks['__gt_bb'] = i.landmarks[group].lms.bounding_box()
if self.reference_shape is None:
# If no reference shape was given, use the mean of the first batch
self.reference_shape = compute_reference_shape(
[i.landmarks['__gt_bb'].lms for i in original_images],
self.diagonal, verbose=verbose)
# Rescale to existing reference shape
images = rescale_images_to_reference_shape(
original_images, '__gt_bb', self.reference_shape,
verbose=verbose)
# Scaling is done - remove temporary gt bounding boxes
for i, i2 in zip(original_images, images):
del i.landmarks['__gt_bb']
del i2.landmarks['__gt_bb']
generated_bb_func = generate_perturbations_from_gt(
images, self.n_perturbations, self._perturb_from_gt_bounding_box,
gt_group=group, bb_group_glob=bounding_box_group_glob,
verbose=verbose)
# for each scale (low --> high)
current_bounding_boxes = []
for j in range(self.n_scales):
if verbose:
if len(self.scales) > 1:
scale_prefix = ' - Scale {}: '.format(j)
else:
scale_prefix = ' - '
else:
scale_prefix = None
# handle scales
if self.scales[j] != 1:
# Scale feature images only if scale is different than 1
scaled_images = scale_images(images, self.scales[j],
prefix=scale_prefix,
verbose=verbose)
else:
scaled_images = images
if j == 0:
current_bounding_boxes = [generated_bb_func(im)
for im in scaled_images]
# Extract scaled ground truth shapes for current scale
scaled_gt_shapes = [i.landmarks[group].lms for i in scaled_images]
# Train the Dlib model
current_bounding_boxes = self.algorithms[j].train(
scaled_images, scaled_gt_shapes, current_bounding_boxes,
prefix=scale_prefix, verbose=verbose)
# Scale current shapes to next resolution, don't bother
# scaling final level
if j != (self.n_scales - 1):
transform = Scale(self.scales[j + 1] / self.scales[j],
n_dims=2)
for bboxes in current_bounding_boxes:
for bb in bboxes:
transform.apply_inplace(bb)
示例3: _train_batch
# 需要导入模块: from menpo.transform import Scale [as 别名]
# 或者: from menpo.transform.Scale import apply_inplace [as 别名]
def _train_batch(self, image_batch, increment=False, group=None,
bounding_box_group_glob=None, verbose=False):
# Rescale to existing reference shape
image_batch = rescale_images_to_reference_shape(
image_batch, group, self.reference_shape,
verbose=verbose)
generated_bb_func = generate_perturbations_from_gt(
image_batch, self.n_perturbations,
self._perturb_from_gt_bounding_box, gt_group=group,
bb_group_glob=bounding_box_group_glob, verbose=verbose)
# for each scale (low --> high)
current_shapes = []
for j in range(self.n_scales):
if verbose:
if len(self.scales) > 1:
scale_prefix = ' - Scale {}: '.format(j)
else:
scale_prefix = ' - '
else:
scale_prefix = None
# Handle holistic features
if j == 0 and self.holistic_features[j] == no_op:
# Saves a lot of memory
feature_images = image_batch
elif j == 0 or self.holistic_features[j] is not self.holistic_features[j - 1]:
# Compute features only if this is the first pass through
# the loop or the features at this scale are different from
# the features at the previous scale
feature_images = compute_features(image_batch,
self.holistic_features[j],
prefix=scale_prefix,
verbose=verbose)
# handle scales
if self.scales[j] != 1:
# Scale feature images only if scale is different than 1
scaled_images = scale_images(feature_images, self.scales[j],
prefix=scale_prefix,
verbose=verbose)
else:
scaled_images = feature_images
# Extract scaled ground truth shapes for current scale
scaled_shapes = [i.landmarks[group].lms for i in scaled_images]
if j == 0:
msg = '{}Aligning reference shape with bounding boxes.'.format(
scale_prefix)
wrap = partial(print_progress, prefix=msg,
end_with_newline=False, verbose=verbose)
# Extract perturbations at the very bottom level
for ii in wrap(scaled_images):
c_shapes = []
for bbox in generated_bb_func(ii):
c_s = align_shape_with_bounding_box(
self.reference_shape, bbox)
c_shapes.append(c_s)
current_shapes.append(c_shapes)
# train supervised descent algorithm
if not increment:
current_shapes = self.algorithms[j].train(
scaled_images, scaled_shapes, current_shapes,
prefix=scale_prefix, verbose=verbose)
else:
current_shapes = self.algorithms[j].increment(
scaled_images, scaled_shapes, current_shapes,
prefix=scale_prefix, verbose=verbose)
# Scale current shapes to next resolution, don't bother
# scaling final level
if j != (self.n_scales - 1):
transform = Scale(self.scales[j + 1] / self.scales[j],
n_dims=2)
for image_shapes in current_shapes:
for shape in image_shapes:
transform.apply_inplace(shape)