本文整理汇总了Python中joblib.Parallel.flatten方法的典型用法代码示例。如果您正苦于以下问题:Python Parallel.flatten方法的具体用法?Python Parallel.flatten怎么用?Python Parallel.flatten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib.Parallel
的用法示例。
在下文中一共展示了Parallel.flatten方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: anisotropic_smooth
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import flatten [as 别名]
def anisotropic_smooth(inpd, fiber_distance_threshold, points_per_fiber=30, n_jobs=2, cluster_max = 10):
""" Average nearby fibers.
The pairwise fiber distance matrix is computed, then fibers
are averaged with their neighbors until an edge (>max_fiber_distance) is encountered.
"""
# polydata to array conversion, fixed-length fiber representation
current_fiber_array = fibers.FiberArray()
current_fiber_array.points_per_fiber = points_per_fiber
current_fiber_array.convert_from_polydata(inpd)
original_number_of_fibers = current_fiber_array.number_of_fibers
# fiber list data structure initialization for easy fiber averaging
curr_count = list()
curr_fibers = list()
curr_indices = list()
for lidx in range(0, current_fiber_array.number_of_fibers):
curr_fibers.append(current_fiber_array.get_fiber(lidx))
curr_count.append(1)
curr_indices.append(list([lidx]))
converged = False
iteration_count = 0
while not converged:
print "<filter.py> ITERATION:", iteration_count, "SUM FIBER COUNTS:", numpy.sum(numpy.array(curr_count))
print "<filter.py> number indices", len(curr_indices)
# fiber data structures for output of this iteration
next_fibers = list()
next_count = list()
next_indices = list()
# information for this iteration
done = numpy.zeros(current_fiber_array.number_of_fibers)
fiber_indices = range(0, current_fiber_array.number_of_fibers)
# if the maximum number of fibers have been combined, stop averaging this fiber
done[numpy.nonzero(numpy.array(curr_count) >= cluster_max)] = 1
# pairwise distance matrix
if USE_PARALLEL:
distances = Parallel(n_jobs=n_jobs, verbose=1)(
delayed(similarity.fiber_distance)(
current_fiber_array.get_fiber(lidx),
current_fiber_array,
0, 'Hausdorff')
for lidx in fiber_indices)
distances = numpy.array(distances)
else:
distances = \
numpy.zeros(
(current_fiber_array.number_of_fibers,
current_fiber_array.number_of_fibers))
for lidx in fiber_indices:
distances[lidx, :] = \
similarity.fiber_distance(
current_fiber_array.get_fiber(lidx),
current_fiber_array, 0, 'Hausdorff')
# distances to self are not of interest
for lidx in fiber_indices:
distances[lidx,lidx] = numpy.inf
# sort the pairwise distances.
distances_flat = distances.flatten()
pair_order = numpy.argsort(distances_flat)
print "<filter.py> DISTANCE MIN:", distances_flat[pair_order[0]], \
"DISTANCE COUNT:", distances.shape
# if the smallest distance is greater or equal to the
# threshold, we have converged
if distances_flat[pair_order[0]] >= fiber_distance_threshold:
converged = True
print "<filter.py> CONVERGED"
break
else:
print "<filter.py> NOT CONVERGED"
# loop variables
idx = 0
pair_idx = pair_order[idx]
number_of_fibers = distances.shape[0]
number_averages = 0
# combine nearest neighbors unless done, until hit threshold
while distances_flat[pair_idx] < fiber_distance_threshold:
# find the fiber indices corresponding to this pairwise distance
# use div and mod
f_row = pair_idx / number_of_fibers
f_col = pair_idx % number_of_fibers
# check if this neighbor pair can be combined
combine = (not done[f_row]) and (not done[f_col])
if combine :
done[f_row] += 1
done[f_col] += 1
#.........这里部分代码省略.........