本文整理汇总了Python中mne.concatenate_raws函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate_raws函数的具体用法?Python concatenate_raws怎么用?Python concatenate_raws使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatenate_raws函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_edf_data
def test_edf_data():
"""Test reading raw edf files"""
raw_py = read_raw_edf(edf_path, misc=range(-4, 0), stim_channel=139,
preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True,
exclude=['EDF Annotations'])
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab, 10)
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
# Test uneven sampling
raw_py = read_raw_edf(edf_uneven_path, stim_channel=None)
data_py, _ = raw_py[0]
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(edf_uneven_eeglab_path)
raw_eeglab = raw_eeglab['data']
data_eeglab = raw_eeglab[0]
# match upsampling
upsample = len(data_eeglab) / len(raw_py)
data_py = np.repeat(data_py, repeats=upsample)
assert_array_equal(data_py, data_eeglab)
示例2: test_data
def test_data():
"""Test reading raw nicolet files."""
tempdir = _TempDir()
raw = read_raw_nicolet(fname, preload=False)
raw_preload = read_raw_nicolet(fname, preload=True)
picks = [2, 3, 12, 13]
assert_array_equal(raw[picks, 20:30][0], raw_preload[picks, 20:30][0])
# Make sure concatenation works
raw2 = concatenate_raws([raw_preload.copy(), raw_preload])
# Test saving and reading
out_fname = op.join(tempdir, 'test_nicolet_raw.fif')
raw2.save(out_fname, tmax=raw.times[-1])
raw2 = Raw(out_fname)
full_data = raw_preload._data
data1, times1 = raw[:10:3, 10:12]
data2, times2 = raw2[:10:3, 10:12]
data3, times3 = raw2[[0, 3, 6, 9], 10:12]
assert_array_almost_equal(data1, full_data[:10:3, 10:12], 9)
assert_array_almost_equal(data1, data2, 9)
assert_array_almost_equal(data1, data3, 9)
assert_array_almost_equal(times1, times2)
assert_array_almost_equal(times1, times3)
示例3: test_data
def test_data():
"""Test reading raw kit files
"""
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path,
stim=list(range(167, 159, -1)), slope='+',
stimthresh=1, preload=True)
print(repr(raw_py))
# Binary file only stores the sensor channels
py_picks = pick_types(raw_py.info, exclude='bads')
raw_bin = op.join(data_dir, 'test_bin_raw.fif')
raw_bin = Raw(raw_bin, preload=True)
bin_picks = pick_types(raw_bin.info, stim=True, exclude='bads')
data_bin, _ = raw_bin[bin_picks]
data_py, _ = raw_py[py_picks]
# this .mat was generated using the Yokogawa MEG Reader
data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
data_Ykgw = data_Ykgw[py_picks]
assert_array_almost_equal(data_py, data_Ykgw)
py_picks = pick_types(raw_py.info, stim=True, ref_meg=False,
exclude='bads')
data_py, _ = raw_py[py_picks]
assert_array_almost_equal(data_py, data_bin)
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
示例4: test_bdf_data
def test_bdf_data():
"""Test reading raw bdf files
"""
raw_py = read_raw_edf(bdf_path, montage=montage_path, eog=eog,
misc=misc, preload=True)
picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')
data_py, _ = raw_py[picks]
print(raw_py) # to test repr
print(raw_py.info) # to test Info repr
# this .mat was generated using the EEG Lab Biosemi Reader
raw_eeglab = io.loadmat(bdf_eeglab_path)
raw_eeglab = raw_eeglab['data'] * 1e-6 # data are stored in microvolts
data_eeglab = raw_eeglab[picks]
assert_array_almost_equal(data_py, data_eeglab)
# Manually checking that float coordinates are imported
assert_true((raw_py.info['chs'][0]['eeg_loc']).any())
assert_true((raw_py.info['chs'][25]['eeg_loc']).any())
assert_true((raw_py.info['chs'][63]['eeg_loc']).any())
# Make sure concatenation works
raw_concat = concatenate_raws([raw_py.copy(), raw_py])
assert_equal(raw_concat.n_times, 2 * raw_py.n_times)
示例5: test_io_egi
def test_io_egi():
"""Test importing EGI simple binary files"""
# test default
tempdir = _TempDir()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', category=RuntimeWarning)
raw = read_raw_egi(egi_fname, include=None)
assert_true('RawEGI' in repr(raw))
raw.load_data() # currently does nothing
assert_equal(len(w), 1)
assert_true(w[0].category == RuntimeWarning)
msg = 'Did not find any event code with more than one event.'
assert_true(msg in '%s' % w[0].message)
include = ['TRSP', 'XXX1']
raw = read_raw_egi(egi_fname, include=include)
repr(raw)
repr(raw.info)
assert_equal('eeg' in raw, True)
out_fname = op.join(tempdir, 'test_egi_raw.fif')
raw.save(out_fname)
raw2 = Raw(out_fname, preload=True)
data1, times1 = raw[:10, :]
data2, times2 = raw2[:10, :]
assert_array_almost_equal(data1, data2, 9)
assert_array_almost_equal(times1, times2)
eeg_chan = [c for c in raw.ch_names if 'EEG' in c]
assert_equal(len(eeg_chan), 256)
picks = pick_types(raw.info, eeg=True)
assert_equal(len(picks), 256)
assert_equal('STI 014' in raw.ch_names, True)
events = find_events(raw, stim_channel='STI 014')
assert_equal(len(events), 2) # ground truth
assert_equal(np.unique(events[:, 1])[0], 0)
assert_true(np.unique(events[:, 0])[0] != 0)
assert_true(np.unique(events[:, 2])[0] != 0)
triggers = np.array([[0, 1, 1, 0], [0, 0, 1, 0]])
# test trigger functionality
assert_raises(RuntimeError, _combine_triggers, triggers, None)
triggers = np.array([[0, 1, 0, 0], [0, 0, 1, 0]])
events_ids = [12, 24]
new_trigger = _combine_triggers(triggers, events_ids)
assert_array_equal(np.unique(new_trigger), np.unique([0, 12, 24]))
assert_raises(ValueError, read_raw_egi, egi_fname,
include=['Foo'])
assert_raises(ValueError, read_raw_egi, egi_fname,
exclude=['Bar'])
for ii, k in enumerate(include, 1):
assert_true(k in raw.event_id)
assert_true(raw.event_id[k] == ii)
# Make sure concatenation works
raw_concat = concatenate_raws([raw.copy(), raw])
assert_equal(raw_concat.n_times, 2 * raw.n_times)
示例6: test_crop_more
def test_crop_more():
"""Test more cropping."""
raw = mne.io.read_raw_fif(fif_fname).crop(0, 11).load_data()
raw._data[:] = np.random.RandomState(0).randn(*raw._data.shape)
onset = np.array([0.47058824, 2.49773765, 6.67873287, 9.15837097])
duration = np.array([0.89592767, 1.13574672, 1.09954739, 0.48868752])
annotations = mne.Annotations(onset, duration, 'BAD')
raw.set_annotations(annotations)
assert len(raw.annotations) == 4
delta = 1. / raw.info['sfreq']
offset = raw.first_samp * delta
raw_concat = mne.concatenate_raws(
[raw.copy().crop(0, 4 - delta),
raw.copy().crop(4, 8 - delta),
raw.copy().crop(8, None)])
assert_allclose(raw_concat.times, raw.times)
assert_allclose(raw_concat[:][0], raw[:][0])
assert raw_concat.first_samp == raw.first_samp
boundary_idx = np.where(
raw_concat.annotations.description == 'BAD boundary')[0]
assert len(boundary_idx) == 2
raw_concat.annotations.delete(boundary_idx)
boundary_idx = np.where(
raw_concat.annotations.description == 'EDGE boundary')[0]
assert len(boundary_idx) == 2
raw_concat.annotations.delete(boundary_idx)
assert len(raw_concat.annotations) == 4
assert_array_equal(raw_concat.annotations.description,
raw.annotations.description)
assert_allclose(raw.annotations.duration, duration)
assert_allclose(raw_concat.annotations.duration, duration)
assert_allclose(raw.annotations.onset, onset + offset)
assert_allclose(raw_concat.annotations.onset, onset + offset,
atol=1. / raw.info['sfreq'])
示例7: test_events_long
def test_events_long():
"""Test events."""
data_path = testing.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'
raw = read_raw_fif(raw_fname, preload=True)
raw_tmin, raw_tmax = 0, 90
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# select gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# load data with usual Epochs for later verification
raw = concatenate_raws([raw, raw.copy(), raw.copy(), raw.copy(),
raw.copy(), raw.copy()])
assert 110 < raw.times[-1] < 130
raw_cropped = raw.copy().crop(raw_tmin, raw_tmax)
events_offline = find_events(raw_cropped)
epochs_offline = Epochs(raw_cropped, events_offline, event_id=event_id,
tmin=tmin, tmax=tmax, picks=picks, decim=1,
reject=dict(grad=4000e-13, eog=150e-6),
baseline=None)
epochs_offline.drop_bad()
# create the mock-client object
rt_client = MockRtClient(raw)
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks, decim=1,
reject=dict(grad=4000e-13, eog=150e-6), baseline=None,
isi_max=1.)
rt_epochs.start()
rt_client.send_data(rt_epochs, picks, tmin=raw_tmin, tmax=raw_tmax,
buffer_size=1000)
expected_events = epochs_offline.events.copy()
expected_events[:, 0] = expected_events[:, 0] - raw_cropped.first_samp
assert np.all(expected_events[:, 0] <=
(raw_tmax - tmax) * raw.info['sfreq'])
assert_array_equal(rt_epochs.events, expected_events)
assert len(rt_epochs) == len(epochs_offline)
data_picks = pick_types(epochs_offline.info, meg='grad', eeg=False,
eog=True,
stim=False, exclude=raw.info['bads'])
for ev_num, ev in enumerate(rt_epochs.iter_evoked()):
if ev_num == 0:
X_rt = ev.data[None, data_picks, :]
y_rt = int(ev.comment) # comment attribute contains the event_id
else:
X_rt = np.concatenate((X_rt, ev.data[None, data_picks, :]), axis=0)
y_rt = np.append(y_rt, int(ev.comment))
X_offline = epochs_offline.get_data()[:, data_picks, :]
y_offline = epochs_offline.events[:, 2]
assert_array_equal(X_rt, X_offline)
assert_array_equal(y_rt, y_offline)
示例8: test_read_vhdr_annotations_and_events
def test_read_vhdr_annotations_and_events():
"""Test load brainvision annotations and parse them to events."""
sfreq = 1000.0
expected_orig_time = 1384359243.794231
expected_onset_latency = np.array(
[0, 486., 496., 1769., 1779., 3252., 3262., 4935., 4945., 5999., 6619.,
6629., 7629., 7699.]
)
expected_annot_description = [
'New Segment/', 'Stimulus/S253', 'Stimulus/S255', 'Stimulus/S254',
'Stimulus/S255', 'Stimulus/S254', 'Stimulus/S255', 'Stimulus/S253',
'Stimulus/S255', 'Response/R255', 'Stimulus/S254', 'Stimulus/S255',
'SyncStatus/Sync On', 'Optic/O 1'
]
expected_events = np.stack([
expected_onset_latency,
np.zeros_like(expected_onset_latency),
[99999, 253, 255, 254, 255, 254, 255, 253, 255, 1255, 254, 255, 99998,
2001],
]).astype('int64').T
expected_event_id = {'New Segment/': 99999, 'Stimulus/S253': 253,
'Stimulus/S255': 255, 'Stimulus/S254': 254,
'Response/R255': 1255, 'SyncStatus/Sync On': 99998,
'Optic/O 1': 2001}
raw = read_raw_brainvision(vhdr_path, eog=eog)
# validate annotations
assert raw.annotations.orig_time == expected_orig_time
assert_allclose(raw.annotations.onset, expected_onset_latency / sfreq)
assert_array_equal(raw.annotations.description, expected_annot_description)
# validate event extraction
events, event_id = events_from_annotations(raw)
assert_array_equal(events, expected_events)
assert event_id == expected_event_id
# validate that None gives us a sorted list
expected_none_event_id = {desc: idx + 1 for idx, desc in enumerate(sorted(
event_id.keys()))}
events, event_id = events_from_annotations(raw, event_id=None)
assert event_id == expected_none_event_id
# Add some custom ones, plus a 2-digit one
s_10 = 'Stimulus/S 10'
raw.annotations.append([1, 2, 3], 10, ['ZZZ', s_10, 'YYY'])
expected_event_id.update(YYY=10001, ZZZ=10002) # others starting at 10001
expected_event_id[s_10] = 10
_, event_id = events_from_annotations(raw)
assert event_id == expected_event_id
# Concatenating two shouldn't change the resulting event_id
# (BAD and EDGE should be ignored)
with pytest.warns(RuntimeWarning, match='expanding outside'):
raw_concat = concatenate_raws([raw.copy(), raw.copy()])
_, event_id = events_from_annotations(raw_concat)
assert event_id == expected_event_id
示例9: test_mf_skips
def test_mf_skips():
"""Test processing of data with skips."""
raw = read_raw_fif(skip_fname, preload=True)
raw.fix_mag_coil_types()
raw.pick_channels(raw.ch_names[:50]) # fast and inaccurate
kwargs = dict(st_only=True, coord_frame='meg', int_order=4, ext_order=3)
# smoke test that this runs
maxwell_filter(raw, st_duration=17., skip_by_annotation=(), **kwargs)
# and this one, too, which will process some all-zero data
maxwell_filter(raw, st_duration=2., skip_by_annotation=(), **kwargs)
with pytest.raises(ValueError, match='duration'):
# skips decrease acceptable duration
maxwell_filter(raw, st_duration=17., **kwargs)
onsets, ends = _annotations_starts_stops(
raw, ('edge', 'bad_acq_skip'), 'skip_by_annotation', invert=True)
assert (ends - onsets).min() / raw.info['sfreq'] == 2.
assert (ends - onsets).max() / raw.info['sfreq'] == 3.
for st_duration in (2., 3.):
raw_sss = maxwell_filter(raw, st_duration=st_duration, **kwargs)
for start, stop in zip(onsets, ends):
orig_data = raw[:, start:stop][0]
new_data = raw_sss[:, start:stop][0]
if (stop - start) / raw.info['sfreq'] >= st_duration:
# Should be modified
assert not np.allclose(new_data, orig_data, atol=1e-20)
else:
# Should not be modified
assert_allclose(new_data, orig_data, atol=1e-20)
# Processing an individual file and concat should be equivalent to
# concat then process
raw.crop(0, 1)
raw_sss = maxwell_filter(raw, st_duration=1., **kwargs)
raw_sss_concat = concatenate_raws([raw_sss, raw_sss.copy()])
raw_concat = concatenate_raws([raw.copy(), raw.copy()])
raw_concat_sss = maxwell_filter(raw_concat, st_duration=1., **kwargs)
raw_concat_sss_bad = maxwell_filter(raw_concat, st_duration=1.,
skip_by_annotation=(), **kwargs)
data_c = raw_concat[:][0]
data_sc = raw_sss_concat[:][0]
data_cs = raw_concat_sss[:][0]
data_csb = raw_concat_sss_bad[:][0]
assert not np.allclose(data_cs, data_c, atol=1e-20)
assert not np.allclose(data_cs, data_csb, atol=1e-20)
assert_allclose(data_sc, data_cs, atol=1e-20)
示例10: prepare
def prepare(datafiles, read_events = True):
"""Given list of files, return MNE RawArray with the data in them. If
read_events is True (as by default), also return a numpy array with events."""
rawdata = mne.concatenate_raws([file_to_raw(f) for f in datafiles])
if read_events:
eventfiles = [file.replace("_data", "_events") for file in datafiles]
events = np.concatenate([pd.read_csv(f).values[:,1:] for f in eventfiles])
return rawdata, events
else:
return rawdata, None
示例11: test_raw
def test_raw():
""" Test bti conversion to Raw object """
for pdf, config, hs, exported in zip(pdf_fnames, config_fnames, hs_fnames,
exported_fnames):
# rx = 2 if 'linux' in pdf else 0
assert_raises(ValueError, read_raw_bti, pdf, 'eggs')
assert_raises(ValueError, read_raw_bti, pdf, config, 'spam')
if op.exists(tmp_raw_fname):
os.remove(tmp_raw_fname)
ex = Raw(exported, preload=True)
ra = read_raw_bti(pdf, config, hs)
assert_true('RawBTi' in repr(ra))
assert_equal(ex.ch_names[:NCH], ra.ch_names[:NCH])
assert_array_almost_equal(ex.info['dev_head_t']['trans'],
ra.info['dev_head_t']['trans'], 7)
dig1, dig2 = [np.array([d['r'] for d in r_.info['dig']])
for r_ in (ra, ex)]
assert_array_almost_equal(dig1, dig2, 18)
coil1, coil2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_array_almost_equal(coil1, coil2, 7)
loc1, loc2 = [np.concatenate([d['loc'].flatten()
for d in r_.info['chs'][:NCH]])
for r_ in (ra, ex)]
assert_allclose(loc1, loc2)
assert_array_equal(ra._data[:NCH], ex._data[:NCH])
assert_array_equal(ra._cals[:NCH], ex._cals[:NCH])
# check our transforms
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
if ex.info[key] is None:
pass
else:
assert_true(ra.info[key] is not None)
for ent in ('to', 'from', 'trans'):
assert_allclose(ex.info[key][ent],
ra.info[key][ent])
# Make sure concatenation works
raw_concat = concatenate_raws([ra.copy(), ra])
assert_equal(raw_concat.n_times, 2 * ra.n_times)
ra.save(tmp_raw_fname)
re = Raw(tmp_raw_fname)
print(re)
for key in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert_true(isinstance(re.info[key], dict))
this_t = re.info[key]['trans']
assert_equal(this_t.shape, (4, 4))
# cehck that matrix by is not identity
assert_true(not np.allclose(this_t, np.eye(4)))
os.remove(tmp_raw_fname)
示例12: load_raw_data
def load_raw_data(subject, test=False):
"""Load Raw data from files.
For a given subject, csv files are loaded, converted to MNE raw instance
and concatenated.
If test is True, training data are composed of series 1 to 8 and test data
of series 9 and test. Otherwise, training data are series 1 to 6 and test
data series 7 and 8.
"""
fnames_train = glob('../data/train/subj%d_series*_data.csv' % (subject))
fnames_train.sort()
if test:
fnames_test = glob('../data/test/subj%d_series*_data.csv' % (subject))
fnames_test.sort()
else:
fnames_test = fnames_train[-2:]
fnames_train = fnames_train[:-2]
# read and concatenate all the files
raw_train = [creat_mne_raw_object(fname) for fname in fnames_train]
raw_train = concatenate_raws(raw_train)
# pick eeg signal
picks = pick_types(raw_train.info, eeg=True)
# get training data
data_train = raw_train._data[picks].T
labels_train = raw_train._data[32:].T
raw_test = [creat_mne_raw_object(fname, read_events=not test) for fname in
fnames_test]
raw_test = concatenate_raws(raw_test)
data_test = raw_test._data[picks].T
# extract labels if validating on series 7&8
labels_test = None
if not test:
labels_test = raw_test._data[32:].T
return data_train, labels_train, data_test, labels_test
示例13: test_clean_eog_ecg
def test_clean_eog_ecg():
"""Test mne clean_eog_ecg"""
check_usage(mne_clean_eog_ecg)
tempdir = _TempDir()
raw = concatenate_raws([Raw(f) for f in [raw_fname, raw_fname, raw_fname]])
raw.info['bads'] = ['MEG 2443']
use_fname = op.join(tempdir, op.basename(raw_fname))
raw.save(use_fname)
with ArgvSetter(('-i', use_fname, '--quiet')):
mne_clean_eog_ecg.run()
fnames = glob.glob(op.join(tempdir, '*proj.fif'))
assert_true(len(fnames) == 2) # two projs
fnames = glob.glob(op.join(tempdir, '*-eve.fif'))
assert_true(len(fnames) == 3) # raw plus two projs
示例14: test_clean_eog_ecg
def test_clean_eog_ecg(tmpdir):
"""Test mne clean_eog_ecg."""
check_usage(mne_clean_eog_ecg)
tempdir = str(tmpdir)
raw = concatenate_raws([read_raw_fif(f)
for f in [raw_fname, raw_fname, raw_fname]])
raw.info['bads'] = ['MEG 2443']
use_fname = op.join(tempdir, op.basename(raw_fname))
raw.save(use_fname)
with ArgvSetter(('-i', use_fname, '--quiet')):
mne_clean_eog_ecg.run()
for key, count in (('proj', 2), ('-eve', 3)):
fnames = glob.glob(op.join(tempdir, '*%s.fif' % key))
assert len(fnames) == count
示例15: test_clean_eog_ecg
def test_clean_eog_ecg():
"""Test mne clean_eog_ecg."""
check_usage(mne_clean_eog_ecg)
tempdir = _TempDir()
raw = concatenate_raws([read_raw_fif(f) for f in [raw_fname, raw_fname, raw_fname]])
raw.info["bads"] = ["MEG 2443"]
use_fname = op.join(tempdir, op.basename(raw_fname))
raw.save(use_fname)
with ArgvSetter(("-i", use_fname, "--quiet")):
mne_clean_eog_ecg.run()
fnames = glob.glob(op.join(tempdir, "*proj.fif"))
assert_true(len(fnames) == 2) # two projs
fnames = glob.glob(op.join(tempdir, "*-eve.fif"))
assert_true(len(fnames) == 3) # raw plus two projs