本文整理汇总了Python中menpo.visualize.progress_bar_str函数的典型用法代码示例。如果您正苦于以下问题:Python progress_bar_str函数的具体用法?Python progress_bar_str怎么用?Python progress_bar_str使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了progress_bar_str函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _import_glob_generator
def _import_glob_generator(
pattern,
extension_map,
max_assets=None,
has_landmarks=False,
landmark_resolver=None,
importer_kwargs=None,
verbose=False,
):
filepaths = list(glob_with_suffix(pattern, extension_map))
if max_assets:
filepaths = filepaths[:max_assets]
n_files = len(filepaths)
if n_files == 0:
raise ValueError("The glob {} yields no assets".format(pattern))
for i, asset in enumerate(
_multi_import_generator(
filepaths,
extension_map,
has_landmarks=has_landmarks,
landmark_resolver=landmark_resolver,
importer_kwargs=importer_kwargs,
)
):
if verbose:
print_dynamic(
"- Loading {} assets: {}".format(n_files, progress_bar_str(float(i + 1) / n_files, show_bar=True))
)
yield asset
示例2: _get_relative_locations
def _get_relative_locations(shapes, graph, level_str, verbose):
r"""
returns numpy.array of size 2 x n_images x n_edges
"""
# convert given shapes to point graphs
if isinstance(graph, Tree):
point_graphs = [PointTree(shape.points, graph.adjacency_array,
graph.root_vertex) for shape in shapes]
else:
point_graphs = [PointDirectedGraph(shape.points, graph.adjacency_array)
for shape in shapes]
# initialize an output numpy array
rel_loc_array = np.empty((2, graph.n_edges, len(point_graphs)))
# get relative locations
for c, pt in enumerate(point_graphs):
# print progress
if verbose:
print_dynamic('{}Computing relative locations from '
'shapes - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(point_graphs),
show_bar=False)))
# get relative locations from this shape
rl = pt.relative_locations()
# store
rel_loc_array[..., c] = rl.T
# rollaxis and return
return np.rollaxis(rel_loc_array, 2, 1)
示例3: _regression_data
def _regression_data(self, images, gt_shapes, perturbed_shapes,
verbose=False):
r"""
Method that generates the regression data : features and delta_ps.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images.
gt_shapes : :map:`PointCloud` list
List of the ground truth shapes that correspond to the images.
perturbed_shapes : :map:`PointCloud` list
List of the perturbed shapes in order to regress.
verbose : `boolean`, optional
If ``True``, the progress is printed.
"""
if verbose:
print_dynamic('- Generating regression data')
n_images = len(images)
features = []
delta_ps = []
for j, (i, s, p_shape) in enumerate(zip(images, gt_shapes,
perturbed_shapes)):
if verbose:
print_dynamic('- Generating regression data - {}'.format(
progress_bar_str((j + 1.) / n_images, show_bar=False)))
for ps in p_shape:
features.append(self.features(i, ps))
delta_ps.append(self.delta_ps(s, ps))
return np.asarray(features), np.asarray(delta_ps)
示例4: apply_pyramid_on_images
def apply_pyramid_on_images(generators, n_levels, verbose=False):
r"""
Exhausts the pyramid generators verbosely
"""
all_images = []
for j in range(n_levels):
if verbose:
level_str = '- Apply pyramid: '
if n_levels > 1:
level_str = '- Apply pyramid: [Level {} - '.format(j + 1)
level_images = []
for c, g in enumerate(generators):
if verbose:
print_dynamic(
'{}Computing feature space/rescaling - {}'.format(
level_str,
progress_bar_str((c + 1.) / len(generators),
show_bar=False)))
level_images.append(next(g))
all_images.append(level_images)
if verbose:
print_dynamic('- Apply pyramid: Done\n')
return all_images
示例5: _build_appearance_model_sparse
def _build_appearance_model_sparse(all_patches_array, graph, patch_shape,
n_channels, n_appearance_parameters,
level_str, verbose):
# build appearance model
if verbose:
print_dynamic('{}Training appearance distribution per '
'edge'.format(level_str))
# compute mean appearance vector
app_mean = np.mean(all_patches_array, axis=1)
# appearance vector and patch vector lengths
patch_len = np.prod(patch_shape) * n_channels
# initialize block sparse covariance matrix
all_cov = lil_matrix((graph.n_vertices * patch_len,
graph.n_vertices * patch_len))
# compute covariance matrix for each edge
for e in range(graph.n_edges):
# print progress
if verbose:
print_dynamic('{}Training appearance distribution '
'per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / graph.n_edges,
show_bar=False)))
# edge vertices
v1 = np.min(graph.adjacency_array[e, :])
v2 = np.max(graph.adjacency_array[e, :])
# find indices in target covariance matrix
v1_from = v1 * patch_len
v1_to = (v1 + 1) * patch_len
v2_from = v2 * patch_len
v2_to = (v2 + 1) * patch_len
# extract data
edge_data = np.concatenate((all_patches_array[v1_from:v1_to, :],
all_patches_array[v2_from:v2_to, :]))
# compute covariance inverse
icov = _covariance_matrix_inverse(np.cov(edge_data),
n_appearance_parameters)
# v1, v2
all_cov[v1_from:v1_to, v2_from:v2_to] += icov[:patch_len, patch_len::]
# v2, v1
all_cov[v2_from:v2_to, v1_from:v1_to] += icov[patch_len::, :patch_len]
# v1, v1
all_cov[v1_from:v1_to, v1_from:v1_to] += icov[:patch_len, :patch_len]
# v2, v2
all_cov[v2_from:v2_to, v2_from:v2_to] += icov[patch_len::, patch_len::]
return app_mean, all_cov.tocsr()
示例6: _build_deformation_model
def _build_deformation_model(graph, relative_locations, level_str, verbose):
# build deformation model
if verbose:
print_dynamic('{}Training deformation distribution per '
'graph edge'.format(level_str))
def_len = 2 * graph.n_vertices
def_cov = np.zeros((def_len, def_len))
for e in range(graph.n_edges):
# print progress
if verbose:
print_dynamic('{}Training deformation distribution '
'per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / graph.n_edges,
show_bar=False)))
# get vertices adjacent to edge
parent = graph.adjacency_array[e, 0]
child = graph.adjacency_array[e, 1]
# compute covariance matrix
edge_cov = np.linalg.inv(np.cov(relative_locations[..., e]))
# store its values
s1 = edge_cov[0, 0]
s2 = edge_cov[1, 1]
s3 = 2 * edge_cov[0, 1]
# Fill the covariance matrix matrix
# get indices
p1 = 2 * parent
p2 = 2 * parent + 1
c1 = 2 * child
c2 = 2 * child + 1
# up-left block
def_cov[p1, p1] += s1
def_cov[p2, p2] += s2
def_cov[p2, p1] += s3
# up-right block
def_cov[p1, c1] = - s1
def_cov[p2, c2] = - s2
def_cov[p1, c2] = - s3 / 2
def_cov[p2, c1] = - s3 / 2
# down-left block
def_cov[c1, p1] = - s1
def_cov[c2, p2] = - s2
def_cov[c1, p2] = - s3 / 2
def_cov[c2, p1] = - s3 / 2
# down-right block
def_cov[c1, c1] += s1
def_cov[c2, c2] += s2
def_cov[c1, c2] += s3
return def_cov
示例7: _create_pyramid
def _create_pyramid(cls, images, n_levels, downscale, pyramid_on_features,
feature_type, verbose=False):
r"""
Function that creates a generator function for Gaussian pyramid. The
pyramid can be created either on the feature space or the original
(intensities) space.
Parameters
----------
images: list of :class:`menpo.image.Image`
The set of landmarked images from which to build the AAM.
n_levels: int
The number of multi-resolution pyramidal levels to be used.
downscale: float
The downscale factor that will be used to create the different
pyramidal levels.
pyramid_on_features: boolean
If True, the features are extracted at the highest level and the
pyramid is created on the feature images.
If False, the pyramid is created on the original (intensities)
space.
feature_type: list of size 1 with str or function/closure or None
The feature type to be used in case pyramid_on_features is enabled.
verbose: bool, Optional
Flag that controls information and progress printing.
Default: False
Returns
-------
generator: function
The generator function of the Gaussian pyramid.
"""
if pyramid_on_features:
# compute features at highest level
feature_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Computing feature space: {}'.format(
progress_bar_str((c + 1.) / len(images),
show_bar=False)))
feature_images.append(compute_features(i, feature_type[0]))
if verbose:
print_dynamic('- Computing feature space: Done\n')
# create pyramid on feature_images
generator = [i.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
for i in feature_images]
else:
# create pyramid on intensities images
# features will be computed per level
generator = [i.gaussian_pyramid(n_levels=n_levels,
downscale=downscale)
for i in images]
return generator
示例8: _scale_images
def _scale_images(cls, images, s, level_str, verbose):
scaled_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic(
'{}Scaling features: {}'.format(
level_str, progress_bar_str((c + 1.) / len(images),
show_bar=False)))
scaled_images.append(i.rescale(s))
return scaled_images
示例9: _normalize_images
def _normalize_images(self, images, group, label, ref_shape, verbose):
# normalize the scaling of all images wrt the reference_shape size
norm_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Normalizing images size: {}'.format(
progress_bar_str((c + 1.) / len(images), show_bar=False)))
i = rescale_to_reference_shape(i, ref_shape, group=group,
label=label)
if self.sigma:
i.pixels = fsmooth(i.pixels, self.sigma)
norm_images.append(i)
return norm_images
示例10: _compute_features
def _compute_features(self, images, level_str, verbose):
feature_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic(
'{}Computing feature space: {}'.format(
level_str, progress_bar_str((c + 1.) / len(images),
show_bar=False)))
if self.features:
i = self.features(i)
feature_images.append(i)
return feature_images
示例11: _compute_minimum_spanning_tree
def _compute_minimum_spanning_tree(shapes, root_vertex, level_str, verbose):
# initialize edges and weights matrix
n_vertices = shapes[0].n_points
n_edges = nchoosek(n_vertices, 2)
weights = np.zeros((n_vertices, n_vertices))
edges = np.empty((n_edges, 2), dtype=np.int32)
# fill edges and weights
e = -1
for i in range(n_vertices-1):
for j in range(i+1, n_vertices, 1):
# edge counter
e += 1
# print progress
if verbose:
print_dynamic('{}Computing complete graph`s weights - {}'.format(
level_str,
progress_bar_str(float(e + 1) / n_edges,
show_bar=False)))
# fill in edges
edges[e, 0] = i
edges[e, 1] = j
# create data matrix of edge
diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes]
diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes]
coords = np.array([diffs_x, diffs_y])
# compute mean
m = np.mean(coords, axis=1)
# compute covariance
c = np.cov(coords)
# get weight
for im in range(len(shapes)):
weights[i, j] += -np.log(multivariate_normal.pdf(coords[:, im],
mean=m, cov=c))
weights[j, i] = weights[i, j]
# create undirected graph
complete_graph = UndirectedGraph(edges)
if verbose:
print_dynamic('{}Minimum spanning graph computed.\n'.format(level_str))
# compute minimum spanning graph
return complete_graph.minimum_spanning_tree(weights, root_vertex)
示例12: _warp_images
def _warp_images(self, images, shapes, _, level_str, verbose):
# extract parts
parts_images = []
for c, (i, s) in enumerate(zip(images, shapes)):
if verbose:
print_dynamic('{}Warping images - {}'.format(
level_str,
progress_bar_str(float(c + 1) / len(images),
show_bar=False)))
parts_image = build_parts_image(
i, s, self.parts_shape, normalize_parts=self.normalize_parts)
parts_images.append(parts_image)
return parts_images
示例13: compute_sparse_covariance
def compute_sparse_covariance(X, adjacency_array, patch_len, level_str,
verbose):
n_features, n_samples = X.shape
n_edges = adjacency_array.shape[0]
# initialize block sparse covariance matrix
all_cov = np.zeros((n_features, n_features))
# compute covariance matrix for each edge
for e in range(n_edges):
# print progress
if verbose:
print_dynamic('{}Distribution per edge - {}'.format(
level_str,
progress_bar_str(float(e + 1) / n_edges,
show_bar=False)))
# edge vertices
v1 = np.min(adjacency_array[e, :])
v2 = np.max(adjacency_array[e, :])
# find indices in target covariance matrix
v1_from = v1 * patch_len
v1_to = (v1 + 1) * patch_len
v2_from = v2 * patch_len
v2_to = (v2 + 1) * patch_len
# extract data
edge_data = np.concatenate((X[v1_from:v1_to, :], X[v2_from:v2_to, :]))
# compute covariance inverse
icov = np.linalg.inv(np.cov(edge_data))
# v1, v2
all_cov[v1_from:v1_to, v2_from:v2_to] += icov[:patch_len, patch_len::]
# v2, v1
all_cov[v2_from:v2_to, v1_from:v1_to] += icov[patch_len::, :patch_len]
# v1, v1
all_cov[v1_from:v1_to, v1_from:v1_to] += icov[:patch_len, :patch_len]
# v2, v2
all_cov[v2_from:v2_to, v2_from:v2_to] += icov[patch_len::, patch_len::]
return np.linalg.inv(all_cov)
示例14: _normalization_wrt_reference_shape
def _normalization_wrt_reference_shape(cls, images, group, label,
reference_shape, verbose=False):
r"""
Normalizes the images sizes with respect to the reference
shape (mean shape) scaling. This step is essential before building a
deformable model.
Parameters
----------
images : list of :map:`MaskedImage`
The set of landmarked images from which to build the model.
group : `string`
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
label : `string`
The label of the landmark manager that you wish to use. If no
label is passed, the convex hull of all landmarks is used.
reference_shape : :map:`PointCloud`
The reference shape that is used to resize all training images to
a consistent object size.
verbose: bool, optional
Flag that controls information and progress printing.
Returns
-------
normalized_images : :map:`MaskedImage` list
A list with the normalized images.
"""
normalized_images = []
for c, i in enumerate(images):
if verbose:
print_dynamic('- Normalizing images size: {}'.format(
progress_bar_str((c + 1.) / len(images),
show_bar=False)))
normalized_images.append(i.rescale_to_reference_shape(
reference_shape, group=group, label=label))
if verbose:
print_dynamic('- Normalizing images size: Done\n')
return normalized_images
示例15: __init__
def __init__(self, samples, centre=True, bias=False, verbose=False,
n_samples=None):
# get the first element as the template and use it to configure the
# data matrix
if n_samples is None:
# samples is a list
n_samples = len(samples)
template = samples[0]
samples = samples[1:]
else:
# samples is an iterator
template = next(samples)
n_features = template.n_parameters
template_vector = template.as_vector()
data = np.zeros((n_samples, n_features), dtype=template_vector.dtype)
# now we can fill in the first element from the template
data[0] = template_vector
del template_vector
if verbose:
print('Allocated data matrix {:.2f}'
'GB'.format(data.nbytes / 2 ** 30))
# 1-based as we have the template vector set already
for i, sample in enumerate(samples, 1):
if i >= n_samples:
break
if verbose:
print_dynamic(
'Building data matrix from {} samples - {}'.format(
n_samples,
progress_bar_str(float(i + 1) / n_samples, show_bar=True)))
data[i] = sample.as_vector()
# compute pca
e_vectors, e_values, mean = principal_component_decomposition(
data, whiten=False, centre=centre, bias=bias, inplace=True)
super(PCAModel, self).__init__(e_vectors, mean, template)
self.centred = centre
self.biased = bias
self._eigenvalues = e_values
# start the active components as all the components
self._n_active_components = int(self.n_components)
self._trimmed_eigenvalues = None