本文整理匯總了Python中joblib.Parallel.flatten方法的典型用法代碼示例。如果您正苦於以下問題:Python Parallel.flatten方法的具體用法?Python Parallel.flatten怎麽用?Python Parallel.flatten使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類joblib.Parallel
的用法示例。
在下文中一共展示了Parallel.flatten方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: anisotropic_smooth
# 需要導入模塊: from joblib import Parallel [as 別名]
# 或者: from joblib.Parallel import flatten [as 別名]
def anisotropic_smooth(inpd, fiber_distance_threshold, points_per_fiber=30, n_jobs=2, cluster_max = 10):
""" Average nearby fibers.
The pairwise fiber distance matrix is computed, then fibers
are averaged with their neighbors until an edge (>max_fiber_distance) is encountered.
"""
# polydata to array conversion, fixed-length fiber representation
current_fiber_array = fibers.FiberArray()
current_fiber_array.points_per_fiber = points_per_fiber
current_fiber_array.convert_from_polydata(inpd)
original_number_of_fibers = current_fiber_array.number_of_fibers
# fiber list data structure initialization for easy fiber averaging
curr_count = list()
curr_fibers = list()
curr_indices = list()
for lidx in range(0, current_fiber_array.number_of_fibers):
curr_fibers.append(current_fiber_array.get_fiber(lidx))
curr_count.append(1)
curr_indices.append(list([lidx]))
converged = False
iteration_count = 0
while not converged:
print "<filter.py> ITERATION:", iteration_count, "SUM FIBER COUNTS:", numpy.sum(numpy.array(curr_count))
print "<filter.py> number indices", len(curr_indices)
# fiber data structures for output of this iteration
next_fibers = list()
next_count = list()
next_indices = list()
# information for this iteration
done = numpy.zeros(current_fiber_array.number_of_fibers)
fiber_indices = range(0, current_fiber_array.number_of_fibers)
# if the maximum number of fibers have been combined, stop averaging this fiber
done[numpy.nonzero(numpy.array(curr_count) >= cluster_max)] = 1
# pairwise distance matrix
if USE_PARALLEL:
distances = Parallel(n_jobs=n_jobs, verbose=1)(
delayed(similarity.fiber_distance)(
current_fiber_array.get_fiber(lidx),
current_fiber_array,
0, 'Hausdorff')
for lidx in fiber_indices)
distances = numpy.array(distances)
else:
distances = \
numpy.zeros(
(current_fiber_array.number_of_fibers,
current_fiber_array.number_of_fibers))
for lidx in fiber_indices:
distances[lidx, :] = \
similarity.fiber_distance(
current_fiber_array.get_fiber(lidx),
current_fiber_array, 0, 'Hausdorff')
# distances to self are not of interest
for lidx in fiber_indices:
distances[lidx,lidx] = numpy.inf
# sort the pairwise distances.
distances_flat = distances.flatten()
pair_order = numpy.argsort(distances_flat)
print "<filter.py> DISTANCE MIN:", distances_flat[pair_order[0]], \
"DISTANCE COUNT:", distances.shape
# if the smallest distance is greater or equal to the
# threshold, we have converged
if distances_flat[pair_order[0]] >= fiber_distance_threshold:
converged = True
print "<filter.py> CONVERGED"
break
else:
print "<filter.py> NOT CONVERGED"
# loop variables
idx = 0
pair_idx = pair_order[idx]
number_of_fibers = distances.shape[0]
number_averages = 0
# combine nearest neighbors unless done, until hit threshold
while distances_flat[pair_idx] < fiber_distance_threshold:
# find the fiber indices corresponding to this pairwise distance
# use div and mod
f_row = pair_idx / number_of_fibers
f_col = pair_idx % number_of_fibers
# check if this neighbor pair can be combined
combine = (not done[f_row]) and (not done[f_col])
if combine :
done[f_row] += 1
done[f_col] += 1
#.........這裏部分代碼省略.........