本文整理汇总了Python中mne.EpochsArray类的典型用法代码示例。如果您正苦于以下问题:Python EpochsArray类的具体用法?Python EpochsArray怎么用?Python EpochsArray使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了EpochsArray类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _read_epochs
def _read_epochs(epochs_mat_fname, info, return_fixations_motor):
"""read the epochs from matfile"""
data = scio.loadmat(epochs_mat_fname,
squeeze_me=True)['data']
ch_names = [ch for ch in data['label'].tolist()]
info['sfreq'] = data['fsample'].tolist()
times = data['time'].tolist()[0]
# deal with different event lengths
if return_fixations_motor is not None:
fixation_mask = data['trialinfo'].tolist()[:, 1] == 6
if return_fixations_motor is False:
fixation_mask = ~fixation_mask
data = np.array(data['trial'].tolist()[fixation_mask].tolist())
else:
data = np.array(data['trial'].tolist().tolist())
# warning: data are not chronologically ordered but
# match the trial info
events = np.zeros((len(data), 3), dtype=np.int)
events[:, 0] = np.arange(len(data))
events[:, 2] = 99 # all events
# we leave it to the user to construct his events
# as from the data['trialinfo'] arbitrary events can be constructed.
# and it is task specific.
this_info = _hcp_pick_info(info, ch_names)
epochs = EpochsArray(data=data, info=this_info, events=events,
tmin=times.min())
# XXX hack for now due to issue with EpochsArray constructor
# cf https://github.com/mne-tools/mne-hcp/issues/9
epochs.times = times
return epochs
示例2: cli
def cli(matfiles, savename, rec_type, infosrc):
"""
Convert brainstorm epochs to mne.Epochs object
"""
if infosrc:
if rec_type is 'ds':
from mne.io import read_raw_ctf as read_raw
elif rec_type is 'fif':
from mne.io import Raw as read_raw
with nostdout():
raw_with_info = read_raw(infosrc)
isFirst = True
for fname in matfiles:
with nostdout():
mat_epoch = sio.loadmat(fname)
# click.echo(mat_epoch)
if isFirst:
data = mat_epoch['F']
times = mat_epoch['Time']
# print times[0,-1]
isFirst = False
else:
data = np.dstack((data, mat_epoch['F']))
# click.echo(data.shape)
data = data.transpose((2,0,1))
n_channels = data.shape[1]
sfreq = times.shape[1] / (times[0,-1] + times[0,1])
if infosrc:
if rec_type is 'ds':
from mne.io import read_raw_ctf as read_raw
elif rec_type is 'fif':
from mne.io import Raw as read_raw
with nostdout():
raw_with_info = read_raw(infosrc)
good_info = raw_with_info.info
# click.echo(len(good_info['ch_names']))
ch_types = [channel_type(good_info, idx) for idx in range(n_channels)]
# click.echo(len(ch_types))
info = create_info(ch_names=good_info['ch_names'], sfreq=sfreq, ch_types=ch_types)
else:
ch_types='mag'
info = create_info(n_channels, sfreq, ch_types)
with nostdout():
epochs = EpochsArray(data, info)
epochs.save(savename)
示例3: test_tfr_multitaper
def test_tfr_multitaper():
"""Test tfr_multitaper"""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002', 'SIM0003']
ch_types = ['grad', 'grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(5, 100, 3, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
assert_raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert_true(tmax > 0.3 and tmax < 0.7)
assert_false(np.any(itc.data < 0.))
assert_true(fmax > 40 and fmax < 60)
示例4: test_add_noise
def test_add_noise():
"""Test noise addition."""
rng = np.random.RandomState(0)
data_path = testing.data_path()
raw = read_raw_fif(data_path + '/MEG/sample/sample_audvis_trunc_raw.fif')
raw.del_proj()
picks = pick_types(raw.info, eeg=True, exclude=())
cov = compute_raw_covariance(raw, picks=picks)
with pytest.raises(RuntimeError, match='to be loaded'):
add_noise(raw, cov)
raw.crop(0, 1).load_data()
with pytest.raises(TypeError, match='Raw, Epochs, or Evoked'):
add_noise(0., cov)
with pytest.raises(TypeError, match='Covariance'):
add_noise(raw, 0.)
# test a no-op (data preserved)
orig_data = raw[:][0]
zero_cov = cov.copy()
zero_cov['data'].fill(0)
add_noise(raw, zero_cov)
new_data = raw[:][0]
assert_allclose(orig_data, new_data, atol=1e-30)
# set to zero to make comparisons easier
raw._data[:] = 0.
epochs = EpochsArray(np.zeros((1, len(raw.ch_names), 100)),
raw.info.copy())
epochs.info['bads'] = []
evoked = epochs.average(picks=np.arange(len(raw.ch_names)))
for inst in (raw, epochs, evoked):
with catch_logging() as log:
add_noise(inst, cov, random_state=rng, verbose=True)
log = log.getvalue()
want = ('to {0}/{1} channels ({0}'
.format(len(cov['names']), len(raw.ch_names)))
assert want in log
if inst is evoked:
inst = EpochsArray(inst.data[np.newaxis], inst.info)
if inst is raw:
cov_new = compute_raw_covariance(inst, picks=picks,
verbose='error') # samples
else:
cov_new = compute_covariance(inst, verbose='error') # avg ref
assert cov['names'] == cov_new['names']
r = np.corrcoef(cov['data'].ravel(), cov_new['data'].ravel())[0, 1]
assert r > 0.99
示例5: test_plot_butterfly
def test_plot_butterfly():
"""Test butterfly view in epochs browse window."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 50, 30, 20
sfreq = 1000.
data = np.sin(rng.randn(n_epochs, n_channels, n_times))
events = np.array([np.arange(n_epochs), [0] * n_epochs, np.ones([n_epochs],
dtype=np.int)]).T
chanlist = ['eeg' if chan < n_channels // 3 else 'ecog'
if chan < n_channels // 2 else 'seeg'
for chan in range(n_channels)]
info = create_info(n_channels, sfreq, chanlist)
epochs = EpochsArray(data, info, events)
fig = epochs.plot(butterfly=True)
keystotest = ['b', 'b', 'left', 'right', 'up', 'down',
'pageup', 'pagedown', '-', '+', '=',
'f11', 'home', '?', 'h', 'o', 'end']
for key in keystotest:
fig.canvas.key_press_event(key)
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
fig.canvas.resize_event()
fig.canvas.close_event() # closing and epoch dropping
plt.close('all')
示例6: test_decim
def test_decim():
"""Test evoked decimation."""
rng = np.random.RandomState(0)
n_epochs, n_channels, n_times = 5, 10, 20
dec_1, dec_2 = 2, 3
decim = dec_1 * dec_2
sfreq = 1000.
sfreq_new = sfreq / decim
data = rng.randn(n_epochs, n_channels, n_times)
events = np.array([np.arange(n_epochs), [0] * n_epochs, [1] * n_epochs]).T
info = create_info(n_channels, sfreq, 'eeg')
info['lowpass'] = sfreq_new / float(decim)
epochs = EpochsArray(data, info, events)
data_epochs = epochs.copy().decimate(decim).get_data()
data_epochs_2 = epochs.copy().decimate(decim, offset=1).get_data()
data_epochs_3 = epochs.decimate(dec_1).decimate(dec_2).get_data()
assert_array_equal(data_epochs, data[:, :, ::decim])
assert_array_equal(data_epochs_2, data[:, :, 1::decim])
assert_array_equal(data_epochs, data_epochs_3)
# Now let's do it with some real data
raw = read_raw_fif(raw_fname, add_eeg_ref=False)
events = read_events(event_name)
sfreq_new = raw.info['sfreq'] / decim
raw.info['lowpass'] = sfreq_new / 4. # suppress aliasing warnings
picks = pick_types(raw.info, meg=True, eeg=True, exclude=())
epochs = Epochs(raw, events, 1, -0.2, 0.5, picks=picks, preload=True,
add_eeg_ref=False)
for offset in (0, 1):
ev_ep_decim = epochs.copy().decimate(decim, offset).average()
ev_decim = epochs.average().decimate(decim, offset)
expected_times = epochs.times[offset::decim]
assert_allclose(ev_decim.times, expected_times)
assert_allclose(ev_ep_decim.times, expected_times)
expected_data = epochs.get_data()[:, :, offset::decim].mean(axis=0)
assert_allclose(ev_decim.data, expected_data)
assert_allclose(ev_ep_decim.data, expected_data)
assert_equal(ev_decim.info['sfreq'], sfreq_new)
assert_array_equal(ev_decim.times, expected_times)
示例7: test_tfr_multitaper
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
# Test channel picking
power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
assert_equal(power_epochs_picked.ch_names, ['SIM0001'])
pytest.raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_averaged.data)
assert_array_almost_equal(power.times, power_epochs.times)
assert_array_almost_equal(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert (tmax > 0.3 and tmax < 0.7)
assert not np.any(itc.data < 0.)
assert (fmax > 40 and fmax < 60)
assert (power2.data.shape == (len(picks), len(freqs), 2))
assert (power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
示例8: dict
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
# First we'll use the multitaper method for calculating the TFR.
# This creates several orthogonal tapering windows in the TFR estimation,
示例9: StratifiedKFold
data_cls = np.asarray(cls_all)
data_pln = np.asarray(pln_all)
# Setup data for epochs and cross validation
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(n_splits=7, shuffle=True)
# Create epochs to use for classification
n_trial, n_chan, n_time = X.shape
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]
# make classifier
clf = LogisticRegression(C=0.0001)
# fit model and score
gat = GeneralizationAcrossTime(
clf=clf, scorer="roc_auc", cv=cv, predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)
# Save model
joblib.dump(gat, data_path + "decode_time_gen/gat_ge.jl")
# make matrix plot and save it
示例10: dict
# Add a 50 Hz sinusoidal burst to the noise and ramp it.
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
data = noise + signal
reject = dict(grad=4000)
events = np.empty((n_epochs, 3), dtype=int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=data, info=info, events=events, event_id=event_id,
reject=reject)
epochs.average().plot()
###############################################################################
# Calculate a time-frequency representation (TFR)
# -----------------------------------------------
#
# Below we'll demonstrate the output of several TFR functions in MNE:
#
# * :func:`mne.time_frequency.tfr_multitaper`
# * :func:`mne.time_frequency.tfr_stockwell`
# * :func:`mne.time_frequency.tfr_morlet`
#
# Multitaper transform
# ====================
示例11: StratifiedKFold
data_pln = data_pln.swapaxes(2, 0)
data_pln = data_pln.swapaxes(2, 1)
# Setup data for epochs and cross validation
X = np.vstack([data_cls, data_pln])
y = np.concatenate([np.zeros(len(data_cls)), np.ones(len(data_pln))])
cv = StratifiedKFold(n_splits=10, shuffle=True)
# Create epochs to use for classification
n_trial, n_chan, n_time = X.shape
events = np.vstack((range(n_trial), np.zeros(n_trial, int), y.astype(int))).T
chan_names = ['MEG %i' % chan for chan in range(n_chan)]
chan_types = ['mag'] * n_chan
sfreq = 250
info = create_info(chan_names, sfreq, chan_types)
epochs = EpochsArray(data=X, info=info, events=events, verbose=False)
epochs.times = selected_times[:n_time]
epochs.crop(-3.8, None)
# fit model and score
gat = GeneralizationAcrossTime(
scorer="accuracy", cv=cv, predict_method="predict")
gat.fit(epochs, y=y)
gat.score(epochs, y=y)
# Save model
joblib.dump(gat, data_path + "decode_time_gen/%s_gat_tr.jl" % subject)
# make matrix plot and save it
fig = gat.plot(