本文整理汇总了Python中dipy.segment.clustering.QuickBundles类的典型用法代码示例。如果您正苦于以下问题:Python QuickBundles类的具体用法?Python QuickBundles怎么用?Python QuickBundles使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了QuickBundles类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_quickbundles_with_python_metric
def test_quickbundles_with_python_metric():
class MDFpy(dipymetric.Metric):
def are_compatible(self, shape1, shape2):
return shape1 == shape2
def dist(self, features1, features2):
dist = np.sqrt(np.sum((features1 - features2)**2, axis=1))
dist = np.sum(dist / len(features1))
return dist
rdata = streamline_utils.set_number_of_points(data, 10)
qb = QuickBundles(threshold=2 * threshold, metric=MDFpy())
clusters = qb.cluster(rdata)
# By default `refdata` refers to data being clustered.
assert_equal(clusters.refdata, rdata)
# Set `refdata` to return indices instead of actual data points.
clusters.refdata = None
assert_array_equal(list(itertools.chain(*clusters)),
list(itertools.chain(*clusters_truth)))
# Cluster read-only data
for datum in rdata:
datum.setflags(write=False)
# Cluster data with different dtype (should be converted into float32)
for datatype in [np.float64, np.int32, np.int64]:
newdata = [datum.astype(datatype) for datum in rdata]
clusters = qb.cluster(newdata)
assert_equal(clusters.centroids[0].dtype, np.float32)
示例2: test_quickbundles_memory_leaks
def test_quickbundles_memory_leaks():
qb = QuickBundles(threshold=2*threshold)
type_name_pattern = "memoryview"
initial_types_refcount = get_type_refcount(type_name_pattern)
qb.cluster(data)
# At this point, all memoryviews created during clustering should be freed.
assert_equal(get_type_refcount(type_name_pattern), initial_types_refcount)
示例3: main
def main():
parser = buildArgsParser()
args = parser.parse_args()
full_tfile = nib.streamlines.load(args.full_tfile)
model_tfile = nib.streamlines.load(args.model_tfile)
model_mask = nib.load(args.model_mask)
# Bring streamlines to voxel space and where coordinate (0,0,0) represents the corner of a voxel.
model_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
model_tfile.streamlines._data += 0.5 # Shift of half a voxel
full_tfile.tractogram.apply_affine(np.linalg.inv(model_mask.affine))
full_tfile.streamlines._data += 0.5 # Shift of half a voxel
assert(model_mask.get_data().sum() == create_binary_map(model_tfile.streamlines, model_mask).sum())
# Resample streamlines
full_streamlines = set_number_of_points(full_tfile.streamlines, args.nb_points_resampling)
model_streamlines = set_number_of_points(model_tfile.streamlines, args.nb_points_resampling)
# Segment model
rng = np.random.RandomState(42)
indices = np.arange(len(model_streamlines))
rng.shuffle(indices)
qb = QuickBundles(args.qb_threshold)
clusters = qb.cluster(model_streamlines, ordering=indices)
# Try to find optimal assignment threshold
best_threshold = None
best_f1_score = -np.inf
thresholds = np.arange(-2, 10, 0.2) + args.qb_threshold
for threshold in thresholds:
indices = qb.find_closest(clusters, full_streamlines, threshold=threshold)
nb_assignments = np.sum(indices != -1)
mask = create_binary_map(full_tfile.streamlines[indices != -1], model_mask)
overlap_per_bundle = _compute_overlap(model_mask.get_data(), mask)
overreach_per_bundle = _compute_overreach(model_mask.get_data(), mask)
# overreach_norm_gt_per_bundle = _compute_overreach_normalize_gt(model_mask.get_data(), mask)
f1_score = _compute_f1_score(overlap_per_bundle, overreach_per_bundle)
if best_f1_score < f1_score:
best_threshold = threshold
best_f1_score = f1_score
print("{}:\t {}/{} ({:.1%}) {:.1%}/{:.1%} ({:.1%}) {}/{}".format(
threshold,
nb_assignments, len(model_streamlines), nb_assignments/len(model_streamlines),
overlap_per_bundle, overreach_per_bundle, f1_score,
mask.sum(), model_mask.get_data().sum()))
if overlap_per_bundle >= 1:
break
print("Best threshold: {} with F1-Score of {}".format(best_threshold, best_f1_score))
示例4: test_quickbundles_with_not_order_invariant_metric
def test_quickbundles_with_not_order_invariant_metric():
metric = dipymetric.AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=np.inf, metric=metric)
streamline = np.arange(10*3, dtype=dtype).reshape((-1, 3))
streamlines = [streamline, streamline[::-1]]
clusters = qb.cluster(streamlines)
assert_equal(len(clusters), 1)
assert_array_equal(clusters[0].centroid, streamline)
示例5: bench_quickbundles
def bench_quickbundles():
dtype = "float32"
repeat = 10
nb_points = 12
streams, hdr = nib.trackvis.read(get_fnames('fornix'))
fornix = [s[0].astype(dtype) for s in streams]
fornix = streamline_utils.set_number_of_points(fornix, nb_points)
# Create eight copies of the fornix to be clustered (one in each octant).
streamlines = []
streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix]
streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix]
streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix]
streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix]
streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix]
streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix]
streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix]
streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix]
# The expected number of clusters of the fornix using threshold=10 is 4.
threshold = 10.
expected_nb_clusters = 4 * 8
print("Timing QuickBundles 1.0 vs. 2.0")
qb = QB_Old(streamlines, threshold, pts=None)
qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat)
print("QuickBundles time: {0:.4}sec".format(qb1_time))
assert_equal(qb.total_clusters, expected_nb_clusters)
sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)]
indices1 = [qb.partitions()[i]['indices']
for i in range(qb.total_clusters)]
qb2 = QB_New(threshold)
qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat)
print("QuickBundles2 time: {0:.4}sec".format(qb2_time))
print("Speed up of {0}x".format(qb1_time / qb2_time))
clusters = qb2.cluster(streamlines)
sizes2 = map(len, clusters)
indices2 = map(lambda c: c.indices, clusters)
assert_equal(len(clusters), expected_nb_clusters)
assert_array_equal(list(sizes2), sizes1)
assert_arrays_equal(indices2, indices1)
qb = QB_New(threshold, metric=MDFpy())
qb3_time = measure("clusters = qb.cluster(streamlines)", repeat)
print("QuickBundles2_python time: {0:.4}sec".format(qb3_time))
print("Speed up of {0}x".format(qb1_time / qb3_time))
clusters = qb.cluster(streamlines)
sizes3 = map(len, clusters)
indices3 = map(lambda c: c.indices, clusters)
assert_equal(len(clusters), expected_nb_clusters)
assert_array_equal(list(sizes3), sizes1)
assert_arrays_equal(indices3, indices1)
示例6: auto_extract
def auto_extract(model_cluster_map, rstreamlines,
number_pts_per_str=NB_POINTS_RESAMPLE,
close_centroids_thr=20,
clean_thr=7.,
disp=False, verbose=False,
ordering=None):
if ordering is None:
ordering = np.arange(len(rstreamlines))
qb = QuickBundles(threshold=REF_BUNDLES_THRESHOLD, metric=AveragePointwiseEuclideanMetric())
closest_bundles = qb.find_closest(model_cluster_map, rstreamlines, clean_thr, ordering=ordering)
return ordering[np.where(closest_bundles >= 0)[0]]
示例7: test_quickbundles_shape_uncompatibility
def test_quickbundles_shape_uncompatibility():
# QuickBundles' old default metric (AveragePointwiseEuclideanMetric, aka MDF)
# requires that all streamlines have the same number of points.
metric = dipymetric.AveragePointwiseEuclideanMetric()
qb = QuickBundles(threshold=20., metric=metric)
assert_raises(ValueError, qb.cluster, data)
# QuickBundles' new default metric (AveragePointwiseEuclideanMetric, aka MDF
# combined with ResampleFeature) will automatically resample streamlines so
# they all have 18 points.
qb = QuickBundles(threshold=20.)
clusters1 = qb.cluster(data)
feature = dipymetric.ResampleFeature(nb_points=18)
metric = dipymetric.AveragePointwiseEuclideanMetric(feature)
qb = QuickBundles(threshold=20., metric=metric)
clusters2 = qb.cluster(data)
assert_array_equal(list(itertools.chain(*clusters1)), list(itertools.chain(*clusters2)))
示例8: test_quickbundles_streamlines
def test_quickbundles_streamlines():
rdata = streamline_utils.set_number_of_points(data, 10)
qb = QuickBundles(threshold=2*threshold)
clusters = qb.cluster(rdata)
# By default `refdata` refers to data being clustered.
assert_equal(clusters.refdata, rdata)
# Set `refdata` to return indices instead of actual data points.
clusters.refdata = None
assert_array_equal(list(itertools.chain(*clusters)),
list(itertools.chain(*clusters_truth)))
# Cluster read-only data
for datum in rdata:
datum.setflags(write=False)
# Cluster data with different dtype (should be converted into float32)
for datatype in [np.float64, np.int32, np.int64]:
newdata = [datum.astype(datatype) for datum in rdata]
clusters = qb.cluster(newdata)
assert_equal(clusters.centroids[0].dtype, np.float32)
示例9: _prepare_gt_bundles_info
def _prepare_gt_bundles_info(bundles_dir, bundles_masks_dir,
gt_bundles_attribs, ref_anat_fname):
# Ref bundles will contain {'name': 'name_of_the_bundle',
# 'threshold': thres_value,
# 'streamlines': list_of_streamlines}
dummy_attribs = {'orientation': 'LPS'}
qb = QuickBundles(20, metric=AveragePointwiseEuclideanMetric())
ref_bundles = []
for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
bundle_name = os.path.splitext(os.path.basename(bundle_f))[0]
bundle_attribs = gt_bundles_attribs.get(os.path.basename(bundle_f))
if bundle_attribs is None:
raise ValueError(
"Missing basic bundle attribs for {0}".format(bundle_f))
# Already resample to avoid doing it for each iteration of chunking
orig_strl = [s for s in get_tracts_voxel_space_for_dipy(
os.path.join(bundles_dir, bundle_f),
ref_anat_fname, dummy_attribs)]
resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
resamp_bundle = [s.astype('f4') for s in resamp_bundle]
bundle_cluster_map = qb.cluster(resamp_bundle)
bundle_cluster_map.refdata = resamp_bundle
bundle_mask = nib.load(os.path.join(bundles_masks_dir,
bundle_name + '.nii.gz'))
ref_bundles.append({'name': bundle_name,
'threshold': bundle_attribs['cluster_threshold'],
'cluster_map': bundle_cluster_map,
'mask': bundle_mask})
return ref_bundles
示例10: score_from_files
def score_from_files(filename, masks_dir, bundles_dir,
tracts_attribs, basic_bundles_attribs,
save_segmented=False, save_IBs=False,
save_VBs=False, save_VCWPs=False,
segmented_out_dir='', segmented_base_name='',
verbose=False):
"""
Computes all metrics in order to score a tractogram.
Given a ``tck`` file of streamlines and a folder containing masks,
compute the percent of: Valid Connections (VC), Invalid Connections (IC),
Valid Connections but Wrong Path (VCWP), No Connections (NC),
Average Bundle Coverage (ABC), Average ROIs Coverage (ARC),
coverage per bundles and coverage per ROIs. It also provides the number of:
Valid Bundles (VB), Invalid Bundles (IB) and streamlines per bundles.
Parameters
------------
filename : str
name of a tracts file
masks_dir : str
name of the directory containing the masks
save_segmented : bool
if true, saves the segmented VC, IC, VCWP and NC
Returns
---------
scores : dict
dictionnary containing a score for each metric
indices : dict
dictionnary containing the indices of streamlines composing VC, IC,
VCWP and NC
"""
if verbose:
logging.basicConfig(level=logging.DEBUG)
rois_dir = masks_dir + "rois/"
bundles_masks_dir = masks_dir + "bundles/"
wm_file = masks_dir + "wm.nii.gz"
wm = nib.load(wm_file)
streamlines = load_streamlines(filename, wm_file, tracts_attribs)
ROIs = [nib.load(rois_dir + f) for f in sorted(os.listdir(rois_dir))]
bundles_masks = [nib.load(bundles_masks_dir + f) for f in sorted(os.listdir(bundles_masks_dir))]
ref_bundles = []
# Ref bundles will contain {'name': 'name_of_the_bundle', 'threshold': thres_value,
# 'streamlines': list_of_streamlines}
dummy_attribs = {'orientation': 'LPS'}
qb = QuickBundles(threshold=REF_BUNDLES_THRESHOLD, metric=AveragePointwiseEuclideanMetric())
out_centroids_dir = os.path.join(segmented_out_dir, os.path.pardir, "centroids")
if not os.path.isdir(out_centroids_dir):
os.mkdir(out_centroids_dir)
rng = np.random.RandomState(42)
for bundle_idx, bundle_f in enumerate(sorted(os.listdir(bundles_dir))):
bundle_attribs = basic_bundles_attribs.get(os.path.basename(bundle_f))
if bundle_attribs is None:
raise ValueError("Missing basic bundle attribs for {0}".format(bundle_f))
# # Already resample to avoid doing it for each iteration of chunking
# orig_strl = [s for s in get_tracts_voxel_space_for_dipy(
# os.path.join(bundles_dir, bundle_f),
# wm_file, dummy_attribs)]
orig_strl = load_streamlines(os.path.join(bundles_dir, bundle_f), wm_file, dummy_attribs)
resamp_bundle = set_number_of_points(orig_strl, NB_POINTS_RESAMPLE)
# resamp_bundle = [s.astype('f4') for s in resamp_bundle]
indices = np.arange(len(resamp_bundle))
rng.shuffle(indices)
bundle_cluster_map = qb.cluster(resamp_bundle, ordering=indices)
# bundle_cluster_map.refdata = resamp_bundle
bundle_mask_inv = nib.Nifti1Image((1 - bundles_masks[bundle_idx].get_data()) * wm.get_data(),
bundles_masks[bundle_idx].get_affine())
ref_bundles.append({'name': os.path.basename(bundle_f).replace('.fib', '').replace('.tck', ''),
'threshold': bundle_attribs['cluster_threshold'],
'cluster_map': bundle_cluster_map,
'mask': bundles_masks[bundle_idx],
'mask_inv': bundle_mask_inv})
logging.debug("{}: {} centroids".format(ref_bundles[-1]['name'], len(bundle_cluster_map)))
nib.streamlines.save(nib.streamlines.Tractogram(bundle_cluster_map.centroids, affine_to_rasmm=np.eye(4)),
os.path.join(out_centroids_dir, ref_bundles[-1]['name'] + ".tck"))
score_func = score_auto_extract_auto_IBs
return score_func(streamlines, bundles_masks, ref_bundles, ROIs, wm,
save_segmented=save_segmented, save_IBs=save_IBs,
save_VBs=save_VBs, save_VCWPs=save_VCWPs,
out_segmented_strl_dir=segmented_out_dir,
base_out_segmented_strl=segmented_base_name,
ref_anat_fname=wm_file)
示例11: _auto_extract_VCs
def _auto_extract_VCs(streamlines, ref_bundles):
# Streamlines = list of all streamlines
# TODO check what is neede
# VC = 0
VC_idx = set()
found_vbs_info = {}
for bundle in ref_bundles:
found_vbs_info[bundle['name']] = {'nb_streamlines': 0,
'streamlines_indices': set()}
# TODO probably not needed
# already_assigned_streamlines_idx = set()
# Need to bookkeep because we chunk for big datasets
processed_strl_count = 0
chunk_size = len(streamlines)
chunk_it = 0
# nb_bundles = len(ref_bundles)
# bundles_found = [False] * nb_bundles
#bundles_potential_VCWP = [set()] * nb_bundles
logging.debug("Starting scoring VCs")
# Start loop here for big datasets
while processed_strl_count < len(streamlines):
if processed_strl_count > 0:
raise NotImplementedError("Not supposed to have more than one chunk!")
logging.debug("Starting chunk: {0}".format(chunk_it))
strl_chunk = streamlines[chunk_it * chunk_size: (chunk_it + 1) * chunk_size]
processed_strl_count += len(strl_chunk)
# Already resample and run quickbundles on the submission chunk,
# to avoid doing it at every call of auto_extract
rstreamlines = set_number_of_points(nib.streamlines.ArraySequence(strl_chunk), NB_POINTS_RESAMPLE)
# qb.cluster had problem with f8
# rstreamlines = [s.astype('f4') for s in rstreamlines]
# chunk_cluster_map = qb.cluster(rstreamlines)
# chunk_cluster_map.refdata = strl_chunk
# # Merge clusters
# all_bundles = ClusterMapCentroid()
# cluster_id_to_bundle_id = []
# for bundle_idx, ref_bundle in enumerate(ref_bundles):
# clusters = ref_bundle["cluster_map"]
# cluster_id_to_bundle_id.extend([bundle_idx] * len(clusters))
# all_bundles.add_cluster(*clusters)
# logging.debug("Starting VC identification through auto_extract")
# qb = QuickBundles(threshold=10, metric=AveragePointwiseEuclideanMetric())
# closest_bundles = qb.find_closest(all_bundles, rstreamlines, threshold=7)
# print("Unassigned streamlines: {}".format(np.sum(closest_bundles == -1)))
# for cluster_id, bundle_id in enumerate(cluster_id_to_bundle_id):
# indices = np.where(closest_bundles == cluster_id)[0]
# print("{}/{} ({}) Found {}".format(cluster_id, len(cluster_id_to_bundle_id), ref_bundles[bundle_id]['name'], len(indices)))
# if len(indices) == 0:
# continue
# vb_info = found_vbs_info.get(ref_bundles[bundle_id]['name'])
# indices = set(indices)
# vb_info['nb_streamlines'] += len(indices)
# vb_info['streamlines_indices'] |= indices
# VC_idx |= indices
qb = QuickBundles(threshold=10, metric=AveragePointwiseEuclideanMetric())
ordering = np.arange(len(rstreamlines))
logging.debug("Starting VC identification through auto_extract")
for bundle_idx, ref_bundle in enumerate(ref_bundles):
print(ref_bundle['name'], ref_bundle['threshold'], len(ref_bundle['cluster_map']))
# The selected indices are from [0, len(strl_chunk)]
# selected_streamlines_indices = auto_extract(ref_bundle['cluster_map'],
# rstreamlines,
# clean_thr=ref_bundle['threshold'],
# ordering=ordering)
closest_bundles = qb.find_closest(ref_bundle['cluster_map'], rstreamlines[ordering], ref_bundle['threshold'])
selected_streamlines_indices = ordering[closest_bundles >= 0]
ordering = ordering[closest_bundles == -1]
# Remove duplicates, when streamlines are assigned to multiple VBs.
# TODO better handling of this case
# selected_streamlines_indices = set(selected_streamlines_indices) - cur_chunk_VC_idx
# cur_chunk_VC_idx |= selected_streamlines_indices
nb_selected_streamlines = len(selected_streamlines_indices)
print("{} assigned".format(nb_selected_streamlines))
if nb_selected_streamlines:
# bundles_found[bundle_idx] = True
# VC += nb_selected_streamlines
#.........这里部分代码省略.........
示例12: import
from dipy.segment.metric import (AveragePointwiseEuclideanMetric,
ResampleFeature)
from dipy.segment.clustering import QuickBundles
feature = ResampleFeature(nb_points=100)
metric = AveragePointwiseEuclideanMetric(feature)
"""
Since we are going to include all of the streamlines in the single cluster
from the streamlines, we set the threshold to `np.inf`. We pull out the
centroid as the standard.
"""
qb = QuickBundles(np.inf, metric=metric)
cluster_cst_l = qb.cluster(model_cst_l)
standard_cst_l = cluster_cst_l.centroids[0]
cluster_af_l = qb.cluster(model_af_l)
standard_af_l = cluster_af_l.centroids[0]
"""
We use the centroid streamline for each atlas bundle as the standard to orient
all of the streamlines in each bundle from the individual subject. Here, the
affine used is the one from the transform between the atlas and individual
tractogram. This is so that the orienting is done relative to the space of the
individual, and not relative to the atlas space.
"""
示例13: auto_extract_VCs
def auto_extract_VCs(streamlines, ref_bundles):
# Streamlines = list of all streamlines
VC = 0
VC_idx = set()
found_vbs_info = {}
for bundle in ref_bundles:
found_vbs_info[bundle['name']] = {'nb_streamlines': 0,
'streamlines_indices': set()}
# Need to bookkeep because we chunk for big datasets
processed_strl_count = 0
chunk_size = 5000
chunk_it = 0
nb_bundles = len(ref_bundles)
bundles_found = [False] * nb_bundles
logging.debug("Starting scoring VCs")
qb = QuickBundles(threshold=20, metric=AveragePointwiseEuclideanMetric())
# Start loop here for big datasets
while processed_strl_count < len(streamlines):
logging.debug("Starting chunk: {0}".format(chunk_it))
strl_chunk = streamlines[chunk_it * chunk_size:
(chunk_it + 1) * chunk_size]
processed_strl_count += len(strl_chunk)
cur_chunk_VC_idx, cur_chunk_IC_idx, cur_chunk_VCWP_idx = set(), set(), set()
# Already resample and run quickbundles on the submission chunk,
# to avoid doing it at every call of auto_extract
rstreamlines = set_number_of_points(strl_chunk, NB_POINTS_RESAMPLE)
# qb.cluster had problem with f8
rstreamlines = [s.astype('f4') for s in rstreamlines]
chunk_cluster_map = qb.cluster(rstreamlines)
chunk_cluster_map.refdata = strl_chunk
logging.debug("Starting VC identification through auto_extract")
for bundle_idx, ref_bundle in enumerate(ref_bundles):
# The selected indices are from [0, len(strl_chunk)]
selected_streamlines_indices = auto_extract(ref_bundle['cluster_map'],
chunk_cluster_map,
clean_thr=ref_bundle['threshold'])
# Remove duplicates, when streamlines are assigned to multiple VBs.
selected_streamlines_indices = set(selected_streamlines_indices) - \
cur_chunk_VC_idx
cur_chunk_VC_idx |= selected_streamlines_indices
nb_selected_streamlines = len(selected_streamlines_indices)
if nb_selected_streamlines:
bundles_found[bundle_idx] = True
VC += nb_selected_streamlines
# Shift indices to match the real number of streamlines
global_select_strl_indices = set([v + chunk_it * chunk_size
for v in selected_streamlines_indices])
vb_info = found_vbs_info.get(ref_bundle['name'])
vb_info['nb_streamlines'] += nb_selected_streamlines
vb_info['streamlines_indices'] |= global_select_strl_indices
VC_idx |= global_select_strl_indices
else:
global_select_strl_indices = set()
chunk_it += 1
# Compute bundle overlap, overreach and f1_scores and update found_vbs_info
for bundle_idx, ref_bundle in enumerate(ref_bundles):
bundle_name = ref_bundle["name"]
bundle_mask = ref_bundle["mask"]
vb_info = found_vbs_info[bundle_name]
# Streamlines are in voxel space since that's how they were
# loaded in the scoring function.
tractogram = Tractogram(streamlines=(streamlines[i] for i in vb_info['streamlines_indices']),
affine_to_rasmm=bundle_mask.affine)
scores = {}
if len(tractogram) > 0:
scores = compute_bundle_coverage_scores(tractogram, bundle_mask)
vb_info['overlap'] = scores.get("OL", 0)
vb_info['overreach'] = scores.get("OR", 0)
vb_info['overreach_norm'] = scores.get("ORn", 0)
vb_info['f1_score'] = scores.get("F1", 0)
return VC_idx, found_vbs_info
示例14: QuickBundles
"""
Fiber clustering
----------------
Based on an agglomerative clustering, and a geometric distance.
"""
clustering_outdir = os.path.join(outdir, "clustering")
cluster_file = os.path.join(clustering_outdir, "clusters.json")
if not os.path.isdir(clustering_outdir):
os.mkdir(clustering_outdir)
if not os.path.isfile(cluster_file):
fibers_18 = [resample(track, nb_pol=18) for track in fibers]
qb = QuickBundles(threshold=10.)
clusters_ = qb.cluster(fibers_18)
clusters = {}
for cnt, cluster in enumerate(clusters_):
clusters[str(cnt)] = {"indices": cluster.indices}
with open(cluster_file, "w") as open_file:
json.dump(clusters, open_file, indent=4)
else:
with open(cluster_file) as open_file:
clusters = json.load(open_file)
if 1: #use_vtk:
ren = pvtk.ren()
colors = numpy.ones((len(fibers),))
nb_clusters = len(clusters)
for clusterid, item in clusters.items():
示例15: points
"""
streams, hdr = tv.read(fname)
streamlines = [i[0] for i in streams]
"""
Perform QuickBundles clustering using the MDF metric and a 10mm distance
threshold. Keep in mind that since the MDF metric requires streamlines to have
the same number of points, the clustering algorithm will internally use a
representation of streamlines that have been automatically downsampled/upsampled
so they have only 12 points (To set manually the number of points,
see :ref:`clustering-examples-ResampleFeature`).
"""
qb = QuickBundles(threshold=10.)
clusters = qb.cluster(streamlines)
"""
`clusters` is a `ClusterMap` object which contains attributes that
provide information about the clustering result.
"""
print("Nb. clusters:", len(clusters))
print("Cluster sizes:", map(len, clusters))
print("Small clusters:", clusters < 10)
print("Streamlines indices of the first cluster:\n", clusters[0].indices)
print("Centroid of the last cluster:\n", clusters[-1].centroid)
"""