本文整理汇总了Python中mne.source_space.read_source_spaces函数的典型用法代码示例。如果您正苦于以下问题:Python read_source_spaces函数的具体用法?Python read_source_spaces怎么用?Python read_source_spaces使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_source_spaces函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_plot_sparse_source_estimates
def test_plot_sparse_source_estimates():
"""Test plotting of (sparse) source estimates."""
sample_src = read_source_spaces(src_fname)
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_size = stc_data.size
stc_data[(np.random.rand(stc_size // 20) * stc_size).astype(int)] = \
np.random.RandomState(0).rand(stc_data.size // 20)
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = 'mne_analyze'
plot_source_estimates(stc, 'sample', colormap=colormap,
background=(1, 1, 0),
subjects_dir=subjects_dir, colorbar=True,
clim='auto')
pytest.raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both', clim='auto',
subjects_dir=subjects_dir)
# now do sparse version
vertices = sample_src[0]['vertno']
inds = [111, 333]
stc_data = np.zeros((len(inds), n_time))
stc_data[0, 1] = 1.
stc_data[1, 4] = 2.
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=False)
示例2: test_stc_mpl
def test_stc_mpl():
"""Test plotting source estimates with matplotlib."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
with pytest.warns(RuntimeWarning, match='not included'):
stc.plot(subjects_dir=subjects_dir, time_unit='s', views='ven',
hemi='rh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='oct1', initial_time=0.001,
colormap='Reds')
fig = stc.plot(subjects_dir=subjects_dir, time_unit='ms', views='dor',
hemi='lh', smoothing_steps=2, subject='sample',
backend='matplotlib', spacing='ico2', time_viewer=True,
colormap='mne')
time_viewer = fig.time_viewer
_fake_click(time_viewer, time_viewer.axes[0], (0.5, 0.5)) # change t
time_viewer.canvas.key_press_event('ctrl+right')
time_viewer.canvas.key_press_event('left')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
hemi='both', subject='sample', backend='matplotlib')
pytest.raises(ValueError, stc.plot, subjects_dir=subjects_dir,
time_unit='ss', subject='sample', backend='matplotlib')
plt.close('all')
示例3: test_plot_sparse_source_estimates
def test_plot_sparse_source_estimates():
"""Test plotting of (sparse) source estimates
"""
sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
colormap = mne_analyze_colormap(format='matplotlib')
# don't really need to test matplotlib method since it's not used now...
colormap = mne_analyze_colormap()
plot_source_estimates(stc, 'sample', colormap=colormap,
config_opts={'background': (1, 1, 0)},
subjects_dir=subjects_dir, colorbar=True)
assert_raises(TypeError, plot_source_estimates, stc, 'sample',
figure='foo', hemi='both')
# now do sparse version
vertices = sample_src[0]['vertno']
n_verts = len(vertices)
stc_data = np.zeros((n_verts * n_time))
stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
stc_data.shape = (n_verts, n_time)
inds = np.where(np.any(stc_data, axis=1))[0]
stc_data = stc_data[inds]
vertices = [vertices[inds], np.empty(0, dtype=np.int)]
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
示例4: test_plot_vec_source_estimates
def test_plot_vec_source_estimates():
"""Test plotting of vector source estimates."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 5
data = np.random.RandomState(0).rand(n_verts, 3, n_time)
stc = VectorSourceEstimate(data, vertices, 1, 1)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
stc.plot('sample', subjects_dir=subjects_dir)
示例5: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
mlab.close()
stc.plot(clim='auto', subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True): # dep
stc.plot(fmin=1, subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)))
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)))
# Test for correct clim values
colormap = 'mne'
assert_raises(ValueError, stc.plot, colormap=colormap,
clim=dict(pos_lims=(5, 10, 15, 20)))
assert_raises(ValueError, stc.plot, colormap=colormap,
clim=dict(pos_lims=(5, 10, 15), kind='foo'))
assert_raises(ValueError, stc.plot, colormap=colormap,
clim=dict(kind='value', pos_lims=(5, 10, 15)), fmin=1)
assert_raises(ValueError, stc.plot, colormap=colormap, clim='foo')
assert_raises(ValueError, stc.plot, colormap=colormap, clim=(5, 10, 15))
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto')
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto')
# Test that stc.data contains enough unique values to use percentages
clim = 'auto'
stc._data = np.zeros_like(stc.data)
assert_raises(ValueError, plot_source_estimates, stc,
colormap=colormap, clim=clim)
mlab.close()
示例6: test_plot_source_spectrogram
def test_plot_source_spectrogram():
"""Test plotting of source spectrogram
"""
sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
'bem', 'sample-oct-6-src.fif'))
# dense version
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.ones((n_verts, n_time))
stc = SourceEstimate(stc_data, vertices, 1, 1)
plot_source_spectrogram([stc, stc], [[1, 2], [3, 4]])
assert_raises(ValueError, plot_source_spectrogram, [], [])
示例7: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
mlab = _import_mlab()
stc.plot(**kwargs)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
figs = [mlab.figure(), mlab.figure()]
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
assert_raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)), **kwargs)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)), **kwargs)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo', **kwargs)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15), **kwargs)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
**kwargs)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto', **kwargs)
# Test handling of degenerate data
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(0.)
plot_source_estimates(stc, **kwargs)
assert any('All data were zero' in str(ww.message) for ww in w)
mlab.close(all=True)
示例8: test_plot_vec_source_estimates
def test_plot_vec_source_estimates():
"""Test plotting of vector source estimates."""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_verts = sum(len(v) for v in vertices)
n_time = 5
data = np.random.RandomState(0).rand(n_verts, 3, n_time)
stc = VectorSourceEstimate(data, vertices, 1, 1)
stc.plot('sample', subjects_dir=subjects_dir)
with pytest.raises(ValueError, match='use "pos_lims"'):
stc.plot('sample', subjects_dir=subjects_dir,
clim=dict(pos_lims=[1, 2, 3]))
示例9: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determining control points."""
sample_src = read_source_spaces(src_fname)
kwargs = dict(subjects_dir=subjects_dir, smoothing_steps=1)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
mlab = _import_mlab()
stc.plot(**kwargs)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), **kwargs)
stc.plot(colormap='hot', clim='auto', **kwargs)
stc.plot(colormap='mne', clim='auto', **kwargs)
figs = [mlab.figure(), mlab.figure()]
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99, **kwargs)
pytest.raises(ValueError, stc.plot, clim='auto', figure=figs, **kwargs)
# Test for correct clim values
with pytest.raises(ValueError, match='monotonically'):
stc.plot(clim=dict(kind='value', pos_lims=[0, 1, 0]), **kwargs)
with pytest.raises(ValueError, match=r'.*must be \(3,\)'):
stc.plot(colormap='mne', clim=dict(pos_lims=(5, 10, 15, 20)), **kwargs)
with pytest.raises(ValueError, match='must be "value" or "percent"'):
stc.plot(clim=dict(pos_lims=(5, 10, 15), kind='foo'), **kwargs)
with pytest.raises(ValueError, match='must be "auto" or dict'):
stc.plot(colormap='mne', clim='foo', **kwargs)
with pytest.raises(TypeError, match='must be an instance of'):
plot_source_estimates('foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='hemi'):
stc.plot(hemi='foo', clim='auto', **kwargs)
with pytest.raises(ValueError, match='Exactly one'):
stc.plot(clim=dict(lims=[0, 1, 2], pos_lims=[0, 1, 2], kind='value'),
**kwargs)
# Test handling of degenerate data: thresholded maps
stc._data.fill(0.)
with pytest.warns(RuntimeWarning, match='All data were zero'):
plot_source_estimates(stc, **kwargs)
mlab.close(all=True)
示例10: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.zeros((n_verts * n_time))
stc_data[(np.random.rand(20) * n_verts * n_time).astype(int)] = 1
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1)
# Test both types of incorrect limits key (lims/pos_lims)
clim = dict(kind='value', lims=(5, 10, 15))
colormap = 'mne_analyze'
assert_raises(KeyError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = dict(kind='value', pos_lims=(5, 10, 15))
colormap = 'hot'
assert_raises(KeyError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
# Test for correct clim values
clim['pos_lims'] = (5, 10, 15, 20)
colormap = 'mne_analyze'
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = 'foo'
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
clim = (5, 10, 15)
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
# Test that stc.data contains enough unique values to use percentages
clim = 'auto'
stc._data = np.zeros_like(stc.data)
assert_raises(ValueError, plot_source_estimates, stc, 'sample',
colormap=colormap, clim=clim)
示例11: test_plot_trans
def test_plot_trans():
"""Test plotting of -trans.fif files and MEG sensor layouts."""
# generate fiducials file for testing
tempdir = _TempDir()
fiducials_path = op.join(tempdir, 'fiducials.fif')
fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
'r': [-0.08061612, -0.02908875, -0.04131077]},
{'coord_frame': 5, 'ident': 2, 'kind': 1,
'r': [0.00146763, 0.08506715, -0.03483611]},
{'coord_frame': 5, 'ident': 3, 'kind': 1,
'r': [0.08436285, -0.02850276, -0.04127743]}]
write_dig(fiducials_path, fid, 5)
mlab = _import_mlab()
evoked = read_evokeds(evoked_fname)[0]
sample_src = read_source_spaces(src_fname)
with warnings.catch_warnings(record=True): # 4D weight tables
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
ref_meg = False if system == 'KIT' else True
plot_trans(info, trans_fname, subject='sample', meg_sensors=True,
subjects_dir=subjects_dir, ref_meg=ref_meg)
mlab.close(all=True)
# KIT ref sensor coil def is defined
plot_trans(infos['KIT'], None, meg_sensors=True, ref_meg=True)
mlab.close(all=True)
info = infos['Neuromag']
assert_raises(TypeError, plot_trans, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert_raises(TypeError, plot_trans, info, trans_fname,
subject='sample', subjects_dir=subjects_dir, src='foo')
assert_raises(ValueError, plot_trans, info, trans_fname,
subject='fsaverage', subjects_dir=subjects_dir,
src=sample_src)
sample_src.plot(subjects_dir=subjects_dir)
mlab.close(all=True)
# no-head version
plot_trans(info, None, meg_sensors=True, dig=True, coord_frame='head')
mlab.close(all=True)
# all coord frames
for coord_frame in ('meg', 'head', 'mri'):
plot_trans(info, meg_sensors=True, dig=True, coord_frame=coord_frame,
trans=trans_fname, subject='sample',
mri_fiducials=fiducials_path, subjects_dir=subjects_dir)
mlab.close(all=True)
# EEG only with strange options
evoked_eeg_ecog = evoked.copy().pick_types(meg=False, eeg=True)
evoked_eeg_ecog.info['projs'] = [] # "remove" avg proj
evoked_eeg_ecog.set_channel_types({'EEG 001': 'ecog'})
with warnings.catch_warnings(record=True) as w:
plot_trans(evoked_eeg_ecog.info, subject='sample', trans=trans_fname,
source='outer_skin', meg_sensors=True, skull=True,
eeg_sensors=['original', 'projected'], ecog_sensors=True,
brain='white', head=True, subjects_dir=subjects_dir)
mlab.close(all=True)
assert_true(['Cannot plot MEG' in str(ww.message) for ww in w])
示例12: test_limits_to_control_points
def test_limits_to_control_points():
"""Test functionality for determing control points
"""
sample_src = read_source_spaces(src_fname)
vertices = [s['vertno'] for s in sample_src]
n_time = 5
n_verts = sum(len(v) for v in vertices)
stc_data = np.random.RandomState(0).rand((n_verts * n_time))
stc_data.shape = (n_verts, n_time)
stc = SourceEstimate(stc_data, vertices, 1, 1, 'sample')
# Test for simple use cases
from mayavi import mlab
stc.plot(subjects_dir=subjects_dir)
stc.plot(clim=dict(pos_lims=(10, 50, 90)), subjects_dir=subjects_dir)
stc.plot(clim=dict(kind='value', lims=(10, 50, 90)), figure=99,
subjects_dir=subjects_dir)
stc.plot(colormap='hot', clim='auto', subjects_dir=subjects_dir)
stc.plot(colormap='mne', clim='auto', subjects_dir=subjects_dir)
figs = [mlab.figure(), mlab.figure()]
assert_raises(RuntimeError, stc.plot, clim='auto', figure=figs,
subjects_dir=subjects_dir)
# Test both types of incorrect limits key (lims/pos_lims)
assert_raises(KeyError, plot_source_estimates, stc, colormap='mne',
clim=dict(kind='value', lims=(5, 10, 15)),
subjects_dir=subjects_dir)
assert_raises(KeyError, plot_source_estimates, stc, colormap='hot',
clim=dict(kind='value', pos_lims=(5, 10, 15)),
subjects_dir=subjects_dir)
# Test for correct clim values
assert_raises(ValueError, stc.plot,
clim=dict(kind='value', pos_lims=[0, 1, 0]),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne',
clim=dict(pos_lims=(5, 10, 15, 20)),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot,
clim=dict(pos_lims=(5, 10, 15), kind='foo'),
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, colormap='mne', clim='foo',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, clim=(5, 10, 15),
subjects_dir=subjects_dir)
assert_raises(ValueError, plot_source_estimates, 'foo', clim='auto',
subjects_dir=subjects_dir)
assert_raises(ValueError, stc.plot, hemi='foo', clim='auto',
subjects_dir=subjects_dir)
# Test handling of degenerate data
stc.plot(clim=dict(kind='value', lims=[0, 0, 1]),
subjects_dir=subjects_dir) # ok
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# thresholded maps
stc._data.fill(1.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data[0].fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 0)
stc._data.fill(0.)
plot_source_estimates(stc, subjects_dir=subjects_dir)
assert_equal(len(w), 1)
mlab.close()
示例13: dec
def dec(*args, **kwargs):
if not check_sklearn_version(min_version='0.12'):
from nose.plugins.skip import SkipTest
raise SkipTest('Test %s skipped, requires scikit-learn >= 0.12'
% function.__name__)
ret = function(*args, **kwargs)
return ret
return dec
if not lacks_mayavi:
mlab.options.backend = 'test'
data_dir = data_path()
subjects_dir = op.join(data_dir, 'subjects')
sample_src = read_source_spaces(op.join(data_dir, 'subjects', 'sample',
'bem', 'sample-oct-6-src.fif'))
ecg_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis_ecg_proj.fif')
evoked_fname = op.join(data_dir, 'MEG', 'sample', 'sample_audvis-ave.fif')
base_dir = op.join(op.dirname(__file__), '..', 'fiff', 'tests', 'data')
fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.2, 0.5
n_chan = 15
raw = fiff.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = fiff.pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
# Use a subset of channels for plotting speed
示例14: _make_forward_solutions
def _make_forward_solutions(info, mri, src, bem, bem_eog, dev_head_ts, mindist,
chpi_rrs, eog_rrs, ecg_rrs, n_jobs):
"""Calculate a forward solution for a subject
Parameters
----------
info : instance of mne.io.meas_info.Info | str
If str, then it should be a filename to a Raw, Epochs, or Evoked
file with measurement information. If dict, should be an info
dict (such as one from Raw, Epochs, or Evoked).
mri : dict | str
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of `.fif` or `.fif.gz` will be assumed to
be in FIF format, any other ending will be assumed to be a text
file with a 4x4 transformation matrix (like the `--trans` MNE-C
option).
src : str | instance of SourceSpaces
If string, should be a source space filename. Can also be an
instance of loaded or generated SourceSpaces.
bem : str
Filename of the BEM (e.g., "sample-5120-5120-5120-bem-sol.fif") to
use.
bem_eog : dict
Spherical BEM to use for EOG (and ECG) simulation.
dev_head_ts : list
List of device<->head transforms.
mindist : float
Minimum distance of sources from inner skull surface (in mm).
chpi_rrs : ndarray
CHPI dipoles to simulate (magnetic dipoles).
eog_rrs : ndarray
EOG dipoles to simulate.
ecg_rrs : ndarray
ECG dipoles to simulate.
n_jobs : int
Number of jobs to run in parallel.
Returns
-------
fwd : generator
A generator for each forward solution in dev_head_ts.
Notes
-----
Some of the forward solution calculation options from the C code
(e.g., `--grad`, `--fixed`) are not implemented here. For those,
consider using the C command line tools or the Python wrapper
`do_forward_solution`.
"""
mri_head_t, mri = _get_mri_head_t(mri)
assert mri_head_t['from'] == FIFF.FIFFV_COORD_MRI
if not isinstance(src, string_types):
if not isinstance(src, SourceSpaces):
raise TypeError('src must be a string or SourceSpaces')
else:
if not op.isfile(src):
raise IOError('Source space file "%s" not found' % src)
if isinstance(bem, dict):
bem_extra = 'dict'
else:
bem_extra = bem
if not op.isfile(bem):
raise IOError('BEM file "%s" not found' % bem)
if not isinstance(info, (dict, string_types)):
raise TypeError('info should be a dict or string')
if isinstance(info, string_types):
info = read_info(info, verbose=False)
# set default forward solution coordinate frame to HEAD
# this could, in principle, be an option
coord_frame = FIFF.FIFFV_COORD_HEAD
# Report the setup
logger.info('Setting up forward solutions')
# Read the source locations
if isinstance(src, string_types):
src = read_source_spaces(src, verbose=False)
else:
# let's make a copy in case we modify something
src = src.copy()
nsource = sum(s['nuse'] for s in src)
if nsource == 0:
raise RuntimeError('No sources are active in these source spaces. '
'"do_all" option should be used.')
logger.info('Read %d source spaces a total of %d active source locations'
% (len(src), nsource))
# make a new dict with the relevant information
mri_id = dict(machid=np.zeros(2, np.int32), version=0, secs=0, usecs=0)
info = dict(nchan=info['nchan'], chs=info['chs'], comps=info['comps'],
ch_names=info['ch_names'],
mri_file='', mri_id=mri_id, meas_file='',
meas_id=None, working_dir=os.getcwd(),
command_line='', bads=info['bads'])
# Only get the EEG channels here b/c we can do MEG later
_, _, eegels, _, eegnames, _ = \
#.........这里部分代码省略.........
示例15: test_plot_alignment
def test_plot_alignment():
"""Test plotting of -trans.fif files and MEG sensor layouts."""
# generate fiducials file for testing
tempdir = _TempDir()
fiducials_path = op.join(tempdir, 'fiducials.fif')
fid = [{'coord_frame': 5, 'ident': 1, 'kind': 1,
'r': [-0.08061612, -0.02908875, -0.04131077]},
{'coord_frame': 5, 'ident': 2, 'kind': 1,
'r': [0.00146763, 0.08506715, -0.03483611]},
{'coord_frame': 5, 'ident': 3, 'kind': 1,
'r': [0.08436285, -0.02850276, -0.04127743]}]
write_dig(fiducials_path, fid, 5)
mlab = _import_mlab()
evoked = read_evokeds(evoked_fname)[0]
sample_src = read_source_spaces(src_fname)
with warnings.catch_warnings(record=True): # 4D weight tables
bti = read_raw_bti(pdf_fname, config_fname, hs_fname, convert=True,
preload=False).info
infos = dict(
Neuromag=evoked.info,
CTF=read_raw_ctf(ctf_fname).info,
BTi=bti,
KIT=read_raw_kit(sqd_fname).info,
)
for system, info in infos.items():
meg = ['helmet', 'sensors']
if system == 'KIT':
meg.append('ref')
plot_alignment(info, trans_fname, subject='sample',
subjects_dir=subjects_dir, meg=meg)
mlab.close(all=True)
# KIT ref sensor coil def is defined
mlab.close(all=True)
info = infos['Neuromag']
assert_raises(TypeError, plot_alignment, 'foo', trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert_raises(TypeError, plot_alignment, info, trans_fname,
subject='sample', subjects_dir=subjects_dir, src='foo')
assert_raises(ValueError, plot_alignment, info, trans_fname,
subject='fsaverage', subjects_dir=subjects_dir,
src=sample_src)
sample_src.plot(subjects_dir=subjects_dir, head=True, skull=True,
brain='white')
mlab.close(all=True)
# no-head version
mlab.close(all=True)
# all coord frames
assert_raises(ValueError, plot_alignment, info)
plot_alignment(info, surfaces=[])
for coord_frame in ('meg', 'head', 'mri'):
plot_alignment(info, meg=['helmet', 'sensors'], dig=True,
coord_frame=coord_frame, trans=trans_fname,
subject='sample', mri_fiducials=fiducials_path,
subjects_dir=subjects_dir, src=sample_src)
mlab.close(all=True)
# EEG only with strange options
evoked_eeg_ecog = evoked.copy().pick_types(meg=False, eeg=True)
evoked_eeg_ecog.info['projs'] = [] # "remove" avg proj
evoked_eeg_ecog.set_channel_types({'EEG 001': 'ecog'})
with warnings.catch_warnings(record=True) as w:
plot_alignment(evoked_eeg_ecog.info, subject='sample',
trans=trans_fname, subjects_dir=subjects_dir,
surfaces=['white', 'outer_skin', 'outer_skull'],
meg=['helmet', 'sensors'],
eeg=['original', 'projected'], ecog=True)
mlab.close(all=True)
assert_true(['Cannot plot MEG' in str(ww.message) for ww in w])
sphere = make_sphere_model(info=evoked.info, r0='auto', head_radius='auto')
bem_sol = read_bem_solution(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif'))
bem_surfs = read_bem_surfaces(op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem.fif'))
sample_src[0]['coord_frame'] = 4 # hack for coverage
plot_alignment(info, subject='sample', eeg='projected',
meg='helmet', bem=sphere, dig=True,
surfaces=['brain', 'inner_skull', 'outer_skull',
'outer_skin'])
plot_alignment(info, trans_fname, subject='sample', meg='helmet',
subjects_dir=subjects_dir, eeg='projected', bem=sphere,
surfaces=['head', 'brain'], src=sample_src)
plot_alignment(info, trans_fname, subject='sample', meg=[],
subjects_dir=subjects_dir, bem=bem_sol, eeg=True,
surfaces=['head', 'inflated', 'outer_skull', 'inner_skull'])
plot_alignment(info, trans_fname, subject='sample',
meg=True, subjects_dir=subjects_dir,
surfaces=['head', 'inner_skull'], bem=bem_surfs)
sphere = make_sphere_model('auto', 'auto', evoked.info)
src = setup_volume_source_space(sphere=sphere)
plot_alignment(info, eeg='projected', meg='helmet', bem=sphere,
src=src, dig=True, surfaces=['brain', 'inner_skull',
'outer_skull', 'outer_skin'])
sphere = make_sphere_model('auto', None, evoked.info) # one layer
plot_alignment(info, trans_fname, subject='sample', meg=False,
coord_frame='mri', subjects_dir=subjects_dir,
surfaces=['brain'], bem=sphere, show_axes=True)
# one layer bem with skull surfaces:
assert_raises(ValueError, plot_alignment, info=info, trans=trans_fname,
#.........这里部分代码省略.........