本文整理汇总了Python中nilearn.input_data.NiftiMasker.fit方法的典型用法代码示例。如果您正苦于以下问题:Python NiftiMasker.fit方法的具体用法?Python NiftiMasker.fit怎么用?Python NiftiMasker.fit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nilearn.input_data.NiftiMasker
的用法示例。
在下文中一共展示了NiftiMasker.fit方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def preprocess(num, subj, subj_dir, subj_warp_dir, force_warp=False):
bold_path = 'BOLD/task001_run00%i/bold_dico_bold7Tp1_to_subjbold7Tp1.nii.gz' % (num+1)
bold_path = os.path.join(DATA_DIR, subj, bold_path)
template_path = os.path.join(DATA_DIR, 'templates', 'grpbold7Tp1', 'brain.nii.gz')
warp_path = os.path.join(DATA_DIR, subj, 'templates', 'bold7Tp1', 'in_grpbold7Tp1', 'subj2tmpl_warp.nii.gz')
output_path = os.path.join(subj_warp_dir, 'run00%i.nii.gz' % num)
if force_warp or not os.path.exists(output_path):
print 'Warping image #%i...' % num
subprocess.call(['fsl5.0-applywarp', '-i', bold_path, '-o', output_path, '-r', template_path, '-w', warp_path, '-d', 'float'])
else:
print 'Reusing cached warp image #%i' % num
print 'Loading image #%i...' % num
bold = load(output_path)
masker = NiftiMasker(load(MASK_FILE))
# masker = niftimasker(load(MASK_FILE), detrend=true, smoothing_fwhm=4.0,
# high_pass=0.01, t_r=2.0, standardize=true)
masker.fit()
print 'Removing confounds from image #%i...' % num
data = masker.transform(bold, confounds(num, subj))
print 'Detrending image #%i...' % num
filtered = np.float32(savgol_filter(data, 61, 5, axis=0))
img = masker.inverse_transform(data-filtered)
print 'Smoothing image #%i...' % num
img = image.smooth_img(img, 4.0)
print 'Saving image #%i...' % num
save(img, os.path.join(subj_dir, 'run00%i.nii.gz' % num))
print 'Finished with image #%i' % num
示例2: create_rois_from_clusters
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def create_rois_from_clusters(contrast_tmap, mask, threshold=3.09,
height_control='brute', cluster_threshold=10,
save_path=None):
if save_path is not None:
if not os.path.exists(save_path):
os.makedirs(save_path)
thresholded = map_threshold(contrast_tmap, mask, threshold,
height_control, cluster_threshold)
cluster_map, n_cluster = label(thresholded.get_data() > 0)
clusters = []
masker = NiftiMasker(mask_img=mask)
masker.fit()
mask_affine = nib.load(mask).get_affine()
for label_ in range(1, n_cluster + 1):
cluster = cluster_map.copy()
cluster[cluster_map != label_] = 0
cluster[cluster_map == label_] = 1
cluster = nib.Nifti1Image(cluster, mask_affine)
clusters.append(cluster)
if save_path is not None:
nib.save(cluster, os.path.join(save_path,
'cluster_{0}.nii'.format(label_)))
return clusters
示例3: make_ttest
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def make_ttest(reg1, reg2):
masker = NiftiMasker(nib.load(MASK_FILE), standardize=False)
masker.fit()
subjects = [1, 2, 3, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
a = np.arctanh(join_all_subjects(reg1, subjects, masker))
b = np.arctanh(join_all_subjects(reg2, subjects, masker))
t, prob = ttest_rel(a, b)
tt = masker.inverse_transform(t)
pp = masker.inverse_transform(prob)
return tt, pp
示例4: load_data
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def load_data():
with open(expanduser('~/data/HCP_unmasked/data.json'), 'r') as f:
data = json.load(f)
for this_data in data:
this_data['array'] += '.npy'
mask_img = expanduser('~/data/HCP_mask/mask_img.nii.gz')
masker = NiftiMasker(mask_img=mask_img, smoothing_fwhm=4,
standardize=True)
masker.fit()
smith2009 = fetch_atlas_smith_2009()
init = smith2009.rsn70
dict_init = masker.transform(init)
return masker, dict_init, sorted(data, key=lambda t: t['filename'])
示例5: SmoothResampleMasker
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
class SmoothResampleMasker(BaseMasker):
def __init__(self, mask_img=None, smoothing_fwhm=None, resampling=None, searchlight=False):
self.mask_img = mask_img
self.smoothing_fwhm = smoothing_fwhm
self.resampling = resampling
self.searchlight = searchlight
self.masker = None
def fit(self):
if self.resampling is not None:
self.mask_img = resample_img(self.mask_img, target_affine=np.diag(self.resampling * np.ones(3)))
self.masker = NiftiMasker(mask_img=self.mask_img)
self.masker.fit()
return self
def transform(self, imgs, confounds=None):
smooth_prefix = '' if self.smoothing_fwhm is None else 's%g' % self.smoothing_fwhm
resample_prefix = '' if self.smoothing_fwhm is None else 'r%g' % self.smoothing_fwhm
if not isinstance(imgs, list):
imgs = [imgs]
path_first = imgs[0] if isinstance(imgs[0], str) else imgs[0].get_filename()
path_first_resampled = os.path.join(os.path.dirname(path_first), resample_prefix + os.path.basename(path_first))
path_first_smoothed = os.path.join(os.path.dirname(path_first), smooth_prefix + resample_prefix + os.path.basename(path_first))
if self.resampling is not None and self.smoothing_fwhm is not None:
if self.resampling is not None:
if not os.path.exists(path_first_resampled) and not os.path.exists(path_first_smoothed):
imgs = resample_img(imgs, target_affine=np.diag(self.resampling * np.ones(3)))
else:
imgs = []
if self.smoothing_fwhm is not None:
if not os.path.exists(path_first_smoothed):
imgs = smooth_img(imgs, self.smoothing_fwhm)
else:
imgs = []
else:
imgs = [check_niimg_3d(img) for img in imgs]
return self.masker.transform(imgs)
示例6: main
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def main(output_dir, n_jobs):
dir_list = [join(output_dir, f) for f in os.listdir(output_dir) if
os.path.isdir(join(output_dir, f))]
mask, func_filenames = get_hcp_data(raw=True)
masker = NiftiMasker(mask_img=mask, smoothing_fwhm=None,
standardize=False)
masker.fit()
test_data = func_filenames[(-n_test_records * 2)::2]
n_samples, n_voxels = np.load(test_data[-1], mmap_mode='r').shape
X = np.empty((n_test_records * n_samples, n_voxels))
for i, this_data in enumerate(test_data):
X[i * n_samples:(i + 1) * n_samples] = np.load(this_data,
mmap_mode='r')
Parallel(n_jobs=n_jobs, verbose=1, temp_folder='/dev/shm')(
delayed(analyse_dir)(dir_name, X, masker) for dir_name in dir_list)
示例7: get_init_objective
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def get_init_objective(output_dir):
mask, func_filenames = get_hcp_data(raw=True)
masker = NiftiMasker(mask_img=mask, smoothing_fwhm=None,
standardize=False)
masker.fit()
rsn70 = fetch_atlas_smith_2009().rsn70
components = masker.transform(rsn70)
print(components.shape)
enet_scale(components.T, inplace=True)
print(np.sum(np.abs(components), axis=1))
test_data = func_filenames[(-n_test_records * 2)::2]
n_samples, n_voxels = np.load(test_data[-1], mmap_mode='r').shape
X = np.empty((n_test_records * n_samples, n_voxels))
for i, this_data in enumerate(test_data):
X[i * n_samples:(i + 1) * n_samples] = np.load(this_data,
mmap_mode='r')
exp_var = {}
for alpha in [1e-2, 1e-3, 1e-4]:
exp_var[alpha] = objective_function(X, components, alpha)
json.dump(open(join(output_dir, 'init_objective.json'), 'r'))
示例8: print
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
# Load Miyawaki dataset
miyawaki_dataset = datasets.fetch_miyawaki2008()
# print basic information on the dataset
print('First functional nifti image (4D) is located at: %s' %
miyawaki_dataset.func[0]) # 4D data
miyawaki_filename = miyawaki_dataset.func[0]
miyawaki_mean_img = image.mean_img(miyawaki_filename)
# This time, we can use the NiftiMasker without changing the default mask
# strategy, as the data has already been masked, and thus lies on a
# homogeneous background
masker = NiftiMasker()
masker.fit(miyawaki_filename)
plot_roi(masker.mask_img_, miyawaki_mean_img,
title="Mask from already masked data")
###############################################################################
# From raw EPI data
# Load NYU resting-state dataset
nyu_dataset = datasets.fetch_nyu_rest(n_subjects=1)
nyu_filename = nyu_dataset.func[0]
nyu_img = nibabel.load(nyu_filename)
# Restrict nyu to 100 frames to speed up computation
from nilearn.image import index_img
示例9: SecondLevelModel
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
class SecondLevelModel(BaseEstimator, TransformerMixin, CacheMixin):
""" Implementation of the General Linear Model for multiple subject
fMRI data
Parameters
----------
mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional,
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters. Automatic mask computation assumes first level imgs have
already been masked.
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal.
memory: string, optional
Path to the directory used to cache the masking process and the glm
fit. By default, no caching is done. Creates instance of joblib.Memory.
memory_level: integer, optional
Rough estimator of the amount of memory used by caching. Higher value
means more memory for caching.
verbose : integer, optional
Indicate the level of verbosity. By default, nothing is printed.
If 0 prints nothing. If 1 prints final computation time.
If 2 prints masker computation details.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs', -2 'all CPUs but one', and so on.
minimize_memory : boolean, optional
Gets rid of some variables on the model fit results that are not
necessary for contrast computation and would only be useful for
further inspection of model details. This has an important impact
on memory consumption. True by default.
"""
def __init__(self, mask=None, smoothing_fwhm=None,
memory=Memory(None), memory_level=1, verbose=0,
n_jobs=1, minimize_memory=True):
self.mask = mask
self.smoothing_fwhm = smoothing_fwhm
if isinstance(memory, _basestring):
self.memory = Memory(memory)
else:
self.memory = memory
self.memory_level = memory_level
self.verbose = verbose
self.n_jobs = n_jobs
self.minimize_memory = minimize_memory
self.second_level_input_ = None
self.confounds_ = None
def fit(self, second_level_input, confounds=None, design_matrix=None):
""" Fit the second-level GLM
1. create design matrix
2. do a masker job: fMRI_data -> Y
3. fit regression to (Y, X)
Parameters
----------
second_level_input: list of `FirstLevelModel` objects or pandas
DataFrame or list of Niimg-like objects.
Giving FirstLevelModel objects will allow to easily compute
the second level contast of arbitrary first level contrasts thanks
to the first_level_contrast argument of the compute_contrast
method. Effect size images will be computed for each model to
contrast at the second level.
If a pandas DataFrame, then they have to contain subject_label,
map_name and effects_map_path. It can contain multiple maps that
would be selected during contrast estimation with the argument
first_level_contrast of the compute_contrast function. The
DataFrame will be sorted based on the subject_label column to avoid
order inconsistencies when extracting the maps. So the rows of the
automatically computed design matrix, if not provided, will
correspond to the sorted subject_label column.
If list of Niimg-like objects then this is taken literally as Y
for the model fit and design_matrix must be provided.
confounds: pandas DataFrame, optional
Must contain a subject_label column. All other columns are
considered as confounds and included in the model. If
design_matrix is provided then this argument is ignored.
The resulting second level design matrix uses the same column
names as in the given DataFrame for confounds. At least two columns
are expected, "subject_label" and at least one confound.
design_matrix: pandas DataFrame, optional
Design matrix to fit the GLM. The number of rows
in the design matrix must agree with the number of maps derived
from second_level_input.
#.........这里部分代码省略.........
示例10: NiftiMasker
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
# From already masked data
from nilearn.input_data import NiftiMasker
# Load Miyawaki dataset
miyawaki = datasets.fetch_miyawaki2008()
miyawaki_img = nibabel.load(miyawaki.func[0])
miyawaki_func = miyawaki_img.get_data()
background = np.mean(miyawaki_func, axis=-1)[..., 14]
# This time, we can use the NiftiMasker without changing the default mask
# strategy, as the data has already been masked, and thus lies on a
# homogeneous background
masker = NiftiMasker()
masker.fit(miyawaki_img)
default_mask = masker.mask_img_.get_data().astype(np.bool)
plt.figure(figsize=(4, 4.5))
display_mask(background, default_mask[..., 14], 'Default background mask')
plt.tight_layout()
###############################################################################
# From raw EPI data
# Load NYU resting-state dataset
nyu = datasets.fetch_nyu_rest(n_subjects=1)
nyu_img = nibabel.load(nyu.func[0])
# Restrict nyu to 100 frames to speed up computation
nyu_func = nyu_img.get_data()[..., :100]
示例11: NiftiMasker
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
The mask is computed and visualized.
"""
### Load nyu_rest dataset #####################################################
from nilearn import datasets
from nilearn.input_data import NiftiMasker
dataset = datasets.fetch_nyu_rest(n_subjects=1)
### Compute the mask ##########################################################
# As this is raw resting-state EPI, the background is noisy and we cannot
# rely on the 'background' masking strategy. We need to use the 'epi' one
nifti_masker = NiftiMasker(standardize=False, mask_strategy='epi',
memory="nilearn_cache", memory_level=2)
nifti_masker.fit(dataset.func[0])
mask = nifti_masker.mask_img_.get_data()
### Visualize the mask ########################################################
import matplotlib.pyplot as plt
import numpy as np
import nibabel
plt.figure()
plt.axis('off')
plt.imshow(np.rot90(nibabel.load(dataset.func[0]).get_data()[..., 20, 0]),
interpolation='nearest', cmap=plt.cm.gray)
ma = np.ma.masked_equal(mask, False)
plt.imshow(np.rot90(ma[..., 20]), interpolation='nearest', cmap=plt.cm.autumn,
alpha=0.5)
plt.title("Mask")
示例12: Hurst_Estimator
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
class Hurst_Estimator(BaseEstimator, TransformerMixin):
"""This class makes Hurst coefficient estimation for Niftii file
easier.
First one should initialise the Niftii Masker with the corresponding
elements:
detrend
low_pass
high_pass
t_r
smoothing_fwhm
memory
memory_level
See nilearn.input_data.NiftiMasker for more details
One must choose the metric and the regulation:
metric: 'wavelet', 'dfa' or 'welch'
regu: 'tv', 'l2', 'off'
lambda: the ponderation for the regulation cost function
Than one can use the fit function and compute the Hurst Exponent load_map
for each signal contained in imgs niftii file.
"""
def __init__(self, mask=None, metric='wavelet', regu='tv', lbda=1, detrend=True,
low_pass=.1, high_pass=.01, t_r=1.05, smoothing_fwhm=6.,
memory='', memory_level=0, n_jobs=1, nb_vanishmoment=2,
norm=1, q=np.array(2), nbvoies=None,
distn=1, wtype=1, j1=2, j2=8):
self.metric = metric
self.mask = mask
self.n_jobs = n_jobs
self.nb_vanishmoment = nb_vanishmoment
self.norm = norm
self.q = q
self.nbvoies = nbvoies
self.distn = distn
self.wtype = wtype
self.j1 = j1
self.j2 = j2
self.regu = regu
self.lbda = lbda
if self.mask is None:
self.masker = NiftiMasker(detrend=detrend,
low_pass=low_pass,
high_pass=high_pass,
t_r=t_r,
smoothing_fwhm=smoothing_fwhm,
standardize=False,
memory_level=memory_level,
verbose=0)
else:
self.masker = NiftiMasker(mask_img=self.mask,
detrend=detrend,
low_pass=low_pass,
high_pass=high_pass,
t_r=t_r,
smoothing_fwhm=smoothing_fwhm,
standardize=False,
memory_level=memory_level,
verbose=0)
self.masker.fit(self.mask)
def fit(self, imgs):
""" compute connectivities
"""
if self.metric == 'wavelet':
jobs = (delayed(wavelet_worker)(img, self.masker, self.regu, self.lbda,
self.nb_vanishmoment, self.norm,
self.q, self.nbvoies,
self.distn, self.wtype,
self.j1, self.j2) for img in imgs)
elif self.metric == 'dfa':
jobs = (delayed(dfa_worker)(img, self.masker, self.regu, self.lbda,
self.wtype,
self.j1, self.j2) for img in imgs)
elif self.metric=='welch':
jobs = (delayed(welch_worker)(img, self.masker, self.regu, self.lbda,
) for img in imgs)
else:
raise ValueError("the metric dico = %s is not yet implemented"
% (self.metric,))
ts = Parallel(n_jobs=5, verbose=5)(jobs)
self.hurst = ts
return self.hurst
def save(self, save_path='',
save_file=None):
if not 'hurst' in dir(self):
os.write(1, 'Nothing to save !!')
return
if save_file is None:
save_file = 'hurstmap_metric_' + self.metric +'_regu_'+ self.regu + str(self.lbda)
save_file = os.path.join(save_path, save_file)
#.........这里部分代码省略.........
示例13: non_parametric_inference
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
def non_parametric_inference(
second_level_input, confounds=None, design_matrix=None,
second_level_contrast=None, mask=None, smoothing_fwhm=None,
model_intercept=True, n_perm=10000, two_sided_test=False,
random_state=None, n_jobs=1, verbose=0):
"""Generate p-values corresponding to the contrasts provided
based on permutation testing. This fuction reuses the 'permuted_ols'
function Nilearn.
Parameters
----------
second_level_input: pandas DataFrame or list of Niimg-like objects.
If a pandas DataFrame, then they have to contain subject_label,
map_name and effects_map_path. It can contain multiple maps that
would be selected during contrast estimation with the argument
first_level_contrast of the compute_contrast function. The
DataFrame will be sorted based on the subject_label column to avoid
order inconsistencies when extracting the maps. So the rows of the
automatically computed design matrix, if not provided, will
correspond to the sorted subject_label column.
If list of Niimg-like objects then this is taken literally as Y
for the model fit and design_matrix must be provided.
confounds: pandas DataFrame, optional
Must contain a subject_label column. All other columns are
considered as confounds and included in the model. If
design_matrix is provided then this argument is ignored.
The resulting second level design matrix uses the same column
names as in the given DataFrame for confounds. At least two columns
are expected, "subject_label" and at least one confound.
design_matrix: pandas DataFrame, optional
Design matrix to fit the GLM. The number of rows
in the design matrix must agree with the number of maps derived
from second_level_input.
Ensure that the order of maps given by a second_level_input
list of Niimgs matches the order of the rows in the design matrix.
second_level_contrast: str or array of shape (n_col), optional
Where ``n_col`` is the number of columns of the design matrix.
The default (None) is accepted if the design matrix has a single
column, in which case the only possible contrast array([1]) is
applied; when the design matrix has multiple columns, an error is
raised.
mask: Niimg-like, NiftiMasker or MultiNiftiMasker object, optional,
Mask to be used on data. If an instance of masker is passed,
then its mask will be used. If no mask is given,
it will be computed automatically by a MultiNiftiMasker with default
parameters. Automatic mask computation assumes first level imgs have
already been masked.
smoothing_fwhm: float, optional
If smoothing_fwhm is not None, it gives the size in millimeters of the
spatial smoothing to apply to the signal.
model_intercept : bool,
If True, a constant column is added to the confounding variates
unless the tested variate is already the intercept.
n_perm : int,
Number of permutations to perform.
Permutations are costly but the more are performed, the more precision
one gets in the p-values estimation.
two_sided_test : boolean,
If True, performs an unsigned t-test. Both positive and negative
effects are considered; the null hypothesis is that the effect is zero.
If False, only positive effects are considered as relevant. The null
hypothesis is that the effect is zero or negative.
random_state : int or None,
Seed for random number generator, to have the same permutations
in each computing units.
n_jobs : int,
Number of parallel workers.
If -1 is provided, all CPUs are used.
A negative number indicates that all the CPUs except (abs(n_jobs) - 1)
ones will be used.
verbose: int, optional
verbosity level (0 means no message).
Returns
-------
neg_log_corrected_pvals_img: Nifti1Image
The image which contains negative logarithm of the
corrected p-values
"""
_check_second_level_input(second_level_input, design_matrix,
flm_object=False, df_object=False)
_check_confounds(confounds)
_check_design_matrix(design_matrix)
# Report progress
t0 = time.time()
if verbose > 0:
#.........这里部分代码省略.........
示例14: NiftiMasker
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
n_events = 200
n_blank_events = 50
event_spacing = 6
jitter_min, jitter_max = -1, 1
t_r = 2
smoothing_fwhm = 1
sigma = 2
#sigma_noise = 0.000001
threshold = 0.7
period_cut = 512
drift_order = 1
mask_img = nb.Nifti1Image(np.ones((n_x, n_y, n_z)), affine=np.eye(4))
masker = NiftiMasker(mask_img=mask_img)
masker.fit()
#HRF peak
peak_range_sim = np.arange(3, 11)
peak_range = np.arange(3, 11)
hrf_ushoot = 16.
norm_resid = np.zeros((len(peak_range), len(peak_range)))
i = 0
for sigma_noise in np.array([2., 10., 0.01]):
for isim, hrf_peak_sim in enumerate(peak_range_sim):
# Simulate with different hrf peaks
示例15: Masker
# 需要导入模块: from nilearn.input_data import NiftiMasker [as 别名]
# 或者: from nilearn.input_data.NiftiMasker import fit [as 别名]
class Masker(object):
"""
Class that takes a binary mask.nii file and allows us to use it
within a volumizer in order to reduce the dimensionality of our data in
realtime.
If we have other ROI masks (e.g. wm, csf), we can use them detrend the data
by setting them as orthogonals.
"""
def __init__(self, mask_img, center=None, radius=8):
self.mask_img = mask_img
self.masker = NiftiMasker(mask_img=mask_img)
self.fit = False
# set the mask center
if center is None:
self.center = self.find_center_of_mass(self.masker)
else:
self.center = center
print("Center=", center)
print("COM calc=", self.find_center_of_mass(self.masker))
# the radius of the mask, used for determining what data to read.
self.radius = radius
self.orthogonals = []
self.use_orthogonal = False
self.ortho_fits = []
def reduce_volume(self, volume, method='mean'):
if not self.fit:
self.masker.fit(volume)
if method == 'mean':
reduced = npm(self.masker.transform(volume['image']))
return reduced
def find_center_of_mass(self, niftimasker):
"""
Find the center of mass of an image given a nifti masker object
in the z plane. We can use this information to only select dicoms
we need in a DicomFilter object.
"""
com = measurements.center_of_mass(
nibabel.load(niftimasker.mask_img).get_data())
affine = nibabel.load(niftimasker.mask_img).affine
offset = affine[0:3, 3]
tcom = np.dot(affine[0:3, 0:3], com) + offset
return tcom[2]
def add_orthogonal(self, mask_img):
# add another mask_img to our orthogonals with get_orthogonal
self.use_orthogonal = True
self.orthogonals.append(NiftiMasker(mask_img=mask_img))
self.ortho_fits.append(False)
def get_orthogonals(self, volume):
"""
Return a list of ROI averages for a volume given a set of
orthogonal masks
"""
for i, fit in enumerate(self.ortho_fits):
if not fit:
self.orthogonals[i].fit(volume)
return [npm(x.transform(volume['image'])) for x in self.orthogonals]