本文整理汇总了Python中mne.utils.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _interpolate_bads_eeg_epochs
def _interpolate_bads_eeg_epochs(epochs, bad_channels_by_epoch=None):
"""Interpolate bad channels per epoch
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
bad_channels_by_epoch : list of list of str
Bad channel names specified for each epoch. For example, for an Epochs
instance containing 3 epochs: ``[['F1'], [], ['F3', 'FZ']]``
"""
if len(bad_channels_by_epoch) != len(epochs):
raise ValueError("Unequal length of epochs (%i) and "
"bad_channels_by_epoch (%i)"
% (len(epochs), len(bad_channels_by_epoch)))
interp_cache = {}
for i, bad_channels in enumerate(bad_channels_by_epoch):
if not bad_channels:
continue
# find interpolation matrix
key = tuple(sorted(bad_channels))
if key in interp_cache:
goods_idx, bads_idx, interpolation = interp_cache[key]
else:
goods_idx, bads_idx, interpolation = interp_cache[key] \
= _make_interpolator(epochs, key)
# apply interpolation
logger.info('Interpolating %i sensors on epoch %i', bads_idx.sum(), i)
epochs._data[i, bads_idx, :] = np.dot(interpolation,
epochs._data[i, goods_idx, :])
示例2: _raw_to_epochs_array
def _raw_to_epochs_array(x, sfreq, events, tmin, tmax):
"""Aux function to create epochs from a 2D array"""
if events.ndim != 1:
raise ValueError('events must be 1D')
if events.dtype != int:
raise ValueError('events must be of dtype int')
# Check that events won't be cut off
n_times = x.shape[-1]
min_ix = 0 - sfreq * tmin
max_ix = n_times - sfreq * tmax
msk_keep = np.logical_and(events > min_ix, events < max_ix)
if not all(msk_keep):
logger.info('Some event windows extend beyond data limits,'
' and will be cut off...')
events = events[msk_keep]
# Pull events from the raw data
epochs = []
for ix in events:
ix_min, ix_max = [ix + int(i_tlim * sfreq)
for i_tlim in [tmin, tmax]]
epochs.append(x[np.newaxis, :, ix_min:ix_max])
epochs = np.concatenate(epochs, axis=0)
times = np.arange(epochs.shape[-1]) / float(sfreq) + tmin
return epochs, times, msk_keep
示例3: chop_raw_data
def chop_raw_data(raw, start_time=60.0, stop_time=360.0, save=True):
'''
This function extracts specified duration of raw data
and writes it into a fif file.
Five mins of data will be extracted by default.
Parameters
----------
raw: Raw object or raw file name as a string.
start_time: Time to extract data from in seconds. Default is 60.0 seconds.
stop_time: Time up to which data is to be extracted. Default is 360.0 seconds.
save: bool, If True the raw file is written to disk.
'''
if isinstance(raw, str):
print 'Raw file name provided, loading raw object...'
raw = mne.io.Raw(raw, preload=True)
# Check if data is longer than required chop duration.
if (raw.n_times / (raw.info['sfreq'])) < (stop_time + start_time):
logger.info("The data is not long enough for file %s.") % (raw.info['filename'])
return
# Obtain indexes for start and stop times.
assert start_time < stop_time, "Start time is greater than stop time."
start_idx = raw.time_as_index(start_time)
stop_idx = raw.time_as_index(stop_time)
data, times = raw[:, start_idx:stop_idx]
raw._data,raw._times = data, times
dur = int((stop_time - start_time) / 60)
if save:
#raw.save(raw.info['filename'].split('/')[-1].split('.')[0] + '_' + str(dur) + 'm-raw.fif')
raw.save(raw.info['filename'].split('-raw.fif')[0] + ',' + str(dur) + 'm-raw.fif')
raw.close()
return
示例4: chop_raw_data
def chop_raw_data(raw, start_time=60.0, stop_time=360.0):
'''
This function extracts specified duration of raw data
and write it into a fif file.
Five mins of data will be extracted by default.
Parameters
----------
raw: Raw object.
start_time: Time to extract data from in seconds. Default is 60.0 seconds.
stop_time: Time up to which data is to be extracted. Default is 360.0 seconds.
'''
# Check if data is longer than required chop duration.
if (raw.n_times / (raw.info['sfreq'])) < (stop_time + 60.0):
logger.info("The data is not long enough.")
return
# Obtain indexes for start and stop times.
assert start_time < stop_time, "Start time is greater than stop time."
start_idx = raw.time_as_index(start_time)
stop_idx = raw.time_as_index(stop_time)
data, times = raw[:, start_idx:stop_idx]
raw._data,raw._times = data, times
dur = int((stop_time - start_time) / 60)
raw.save(raw.info['filename'].split('/')[-1].split('.')[0]+'_'+str(dur)+'m.fif')
# For the moment, simply warn.
logger.warning('The file name is not saved in standard form.')
return
示例5: label_svd
def label_svd(sub_leadfield, n_svd_comp, ch_names):
""" Computes SVD of subleadfield for sensor types separately
Parameters:
-----------
sub_leadfield: numpy array (n_sens x n_vert) with part of the
leadfield matrix
n_svd_comp: scalar, number of SVD components required
ch_names: list of channel names
Returns:
--------
this_label_lfd_summary: numpy array, n_svd_comp scaled SVD components
of subleadfield
OH Aug 2015
"""
logger.info("\nComputing SVD within labels, using %d component(s)"
% n_svd_comp)
EEG_idx = [cc for cc in range(len(ch_names)) if ch_names[cc][:3]=='EEG']
MAG_idx = [cc for cc in range(len(ch_names)) if (ch_names[cc][:3]=='MEG'
and ch_names[cc][-1:]=='1')]
GRA_idx = [cc for cc in range(len(ch_names)) if (ch_names[cc][:3]=='MEG'
and (ch_names[cc][-1:]=='2' or ch_names[cc][-1:]=='3'))]
list_idx = []
u_idx = -1 # keep track of which element of u_svd belongs t which sensor type
if MAG_idx:
list_idx.append(MAG_idx)
u_idx += 1
u_mag = u_idx
if GRA_idx:
list_idx.append(GRA_idx)
u_idx += 1
u_gra = u_idx
if EEG_idx:
list_idx.append(EEG_idx)
u_idx += 1
u_eeg = u_idx
# # compute SVD of sub-leadfield for individual sensor types
u_svd = [get_svd_comps(sub_leadfield[ch_idx,:], n_svd_comp) for ch_idx
in list_idx]
# put sensor types back together
this_label_lfd_summary = np.zeros([len(ch_names),u_svd[0].shape[1]])
if MAG_idx:
this_label_lfd_summary[MAG_idx] = u_svd[u_mag]
if GRA_idx:
this_label_lfd_summary[GRA_idx] = u_svd[u_gra]
if EEG_idx:
this_label_lfd_summary[EEG_idx] = u_svd[u_eeg]
return this_label_lfd_summary
示例6: _interpolate_bads_eeg
def _interpolate_bads_eeg(inst, picks=None, verbose=None):
""" Interpolate bad EEG channels.
Operates in place.
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
picks: np.ndarray, shape(n_channels, ) | list | None
The channel indices to be used for interpolation.
"""
from mne.bem import _fit_sphere
from mne.utils import logger, warn
from mne.channels.interpolation import _do_interp_dots
from mne.channels.interpolation import _make_interpolation_matrix
import numpy as np
if picks is None:
picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
inst.info._check_consistency()
bads_idx[picks] = [inst.ch_names[ch] in inst.info['bads'] for ch in picks]
if len(picks) == 0 or bads_idx.sum() == 0:
return
goods_idx[picks] = True
goods_idx[bads_idx] = False
pos = inst._get_channel_positions(picks)
# Make sure only good EEG are used
bads_idx_pos = bads_idx[picks]
goods_idx_pos = goods_idx[picks]
pos_good = pos[goods_idx_pos]
pos_bad = pos[bads_idx_pos]
# test spherical fit
radius, center = _fit_sphere(pos_good)
distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
distance = np.mean(distance / radius)
if np.abs(1. - distance) > 0.1:
warn('Your spherical fit is poor, interpolation results are '
'likely to be inaccurate.')
logger.info('Computing interpolation matrix from {0} sensor '
'positions'.format(len(pos_good)))
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
logger.info('Interpolating {0} sensors'.format(len(pos_bad)))
_do_interp_dots(inst, interpolation, goods_idx, bads_idx)
示例7: _check_fname
def _check_fname(fname, overwrite=False, must_exist=False):
"""Check for file existence."""
_validate_type(fname, 'str', 'fname')
from mne.utils import logger
if must_exist and not op.isfile(fname):
raise IOError('File "%s" does not exist' % fname)
if op.isfile(fname):
if not overwrite:
raise IOError('Destination file exists. Please use option '
'"overwrite=True" to force overwriting.')
elif overwrite != 'read':
logger.info('Overwriting existing file.')
示例8: _find_bad_channels
def _find_bad_channels(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the first step of the FASTER algorithm.
This function attempts to automatically mark bad EEG channels by performing
outlier detection. It operated on epoched data, to make sure only relevant
data is analyzed.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'variance', 'correlation', 'hurst', 'kurtosis', 'line_noise'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
from scipy.stats import kurtosis
metrics = {
'variance': lambda x: np.var(x, axis=1),
'correlation': lambda x: np.mean(
np.ma.masked_array(np.corrcoef(x),
np.identity(len(x), dtype=bool)), axis=0),
'hurst': lambda x: _hurst(x),
'kurtosis': lambda x: kurtosis(x, axis=1),
'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
[50, 60]),
}
if use_metrics is None:
use_metrics = metrics.keys()
# Concatenate epochs in time
data = epochs.get_data()[:, picks]
data = data.transpose(1, 0, 2).reshape(data.shape[1], -1)
# Find bad channels
bads = defaultdict(list)
info = pick_info(epochs.info, picks, copy=True)
for ch_type, chs in _picks_by_type(info):
logger.info('Bad channel detection on %s channels:' % ch_type.upper())
for metric in use_metrics:
scores = metrics[metric](data[chs])
bad_channels = [epochs.ch_names[picks[chs[i]]]
for i in find_outliers(scores, thresh, max_iter)]
logger.info('\tBad by %s: %s' % (metric, bad_channels))
bads[metric].append(bad_channels)
bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
return bads
示例9: _find_bad_channels_in_epochs
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the fourth step of the FASTER algorithm.
This function attempts to automatically mark bad channels in each epochs by
performing outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation', 'median_gradient'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
'amplitude': lambda x: np.ptp(x, axis=2),
'deviation': lambda x: _deviation(x),
'variance': lambda x: np.var(x, axis=2),
'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
[50, 60]),
}
if use_metrics is None:
use_metrics = metrics.keys()
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for
m in metrics)
for ch_type, chs in _picks_by_type(info):
ch_names = [info['ch_names'][k] for k in chs]
chs = np.array(chs)
for metric in use_metrics:
logger.info('Bad channel-in-epoch detection on %s channels:'
% ch_type.upper())
s_epochs = metrics[metric](data[:, chs])
for i_epochs, epoch in enumerate(s_epochs):
outliers = find_outliers(epoch, thresh, max_iter)
if len(outliers) > 0:
bad_segment = [ch_names[k] for k in outliers]
logger.info('Epoch %d, Bad by %s:\n\t%s' % (
i_epochs, metric, bad_segment))
bads[metric][i_epochs, chs[outliers]] = True
return bads
示例10: __init__
def __init__(self, input_fname, montage=None, eog=None,
misc=(-4, -3, -2, -1), stim_channel=None, scale=1e-6, sfreq=250,
missing_tol=1, preload=True, verbose=None):
bci_info = {'missing_tol': missing_tol, 'stim_channel': stim_channel}
openbci_channames = ["FP1", "FP2", "C3", "C4", "P7", "P8", "O1", "O2", "F7", "F8", "F3", "F4", "T7", "T8", "P3", "P4"]
if not eog:
eog = list()
if not misc:
misc = list()
nsamps, nchan = self._get_data_dims(input_fname)
last_samps = [nsamps - 1]
ch_names = ['EEG %03d' % num for num in range(1, nchan + 1)]
ch_names[:nchan-4] = openbci_channames[:nchan-4]
ch_types = ['eeg'] * nchan
if misc:
misc_names = ['MISC %03d' % ii for ii in range(1, len(misc) + 1)]
misc_types = ['misc'] * len(misc)
for ii, mi in enumerate(misc):
ch_names[mi] = misc_names[ii]
ch_types[mi] = misc_types[ii]
if eog:
eog_names = ['EOG %03d' % ii for ii in range(len(eog))]
eog_types = ['eog'] * len(eog)
for ii, ei in enumerate(eog):
ch_names[ei] = eog_names[ii]
ch_types[ei] = eog_types[ii]
if stim_channel:
ch_names[stim_channel] = 'STI 014'
ch_types[stim_channel] = 'stim'
# mark last channel as the timestamp channel
ch_names[-1] = "Timestamps"
ch_types[-1] = "misc"
# fix it for eog and misc marking
info = create_info(ch_names, sfreq, ch_types, montage)
info["buffer_size_sec"] = 1.
super(RawOpenBCI, self).__init__(info, last_samps=last_samps,
raw_extras=[bci_info],
filenames=[input_fname],
preload=False, verbose=verbose)
# load data
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data = self._read_segment()
示例11: read_info
def read_info(subject, data_type, run_index=0, hcp_path=op.curdir):
"""Read info from unprocessed data
Parameters
----------
subject : str, file_map
The subject
data_type : str
The kind of data to read. The following options are supported:
'rest'
'task_motor'
'task_story_math'
'task_working_memory'
'noise_empty_room'
'noise_subject'
run_index : int
The run index. For the first run, use 0, for the second, use 1.
Also see HCP documentation for the number of runs for a given data
type.
hcp_path : str
The HCP directory, defaults to op.curdir.
Returns
-------
info : instance of mne.io.meas_info.Info
The MNE channel info object.
.. note::
HCP MEG does not deliver only 3 of the 5 task packages from MRI HCP.
"""
raw, config = get_file_paths(
subject=subject, data_type=data_type, output='raw',
run_index=run_index, hcp_path=hcp_path)
if not op.exists(raw):
raw = None
meg_info = _read_bti_info(raw, config)
if raw is None:
logger.info('Did not find Raw data. Guessing EMG, ECG and EOG '
'channels')
rename_channels(meg_info, dict(_label_mapping))
return meg_info
示例12: _make_interpolator
def _make_interpolator(inst, bad_channels):
"""Find indexes and interpolation matrix to interpolate bad channels
Parameters
----------
inst : mne.io.Raw, mne.Epochs or mne.Evoked
The data to interpolate. Must be preloaded.
"""
bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool)
picks = pick_types(inst.info, meg=False, eeg=True, exclude=[])
bads_idx[picks] = [inst.ch_names[ch] in bad_channels for ch in picks]
goods_idx[picks] = True
goods_idx[bads_idx] = False
if bads_idx.sum() != len(bad_channels):
logger.warning('Channel interpolation is currently only implemented '
'for EEG. The MEG channels marked as bad will remain '
'untouched.')
pos = get_channel_positions(inst, picks)
# Make sure only EEG are used
bads_idx_pos = bads_idx[picks]
goods_idx_pos = goods_idx[picks]
pos_good = pos[goods_idx_pos]
pos_bad = pos[bads_idx_pos]
# test spherical fit
radius, center = _fit_sphere(pos_good)
distance = np.sqrt(np.sum((pos_good - center) ** 2, 1))
distance = np.mean(distance / radius)
if np.abs(1. - distance) > 0.1:
logger.warning('Your spherical fit is poor, interpolation results are '
'likely to be inaccurate.')
logger.info('Computing interpolation matrix from {0} sensor '
'positions'.format(len(pos_good)))
interpolation = _make_interpolation_matrix(pos_good, pos_bad)
return goods_idx, bads_idx, interpolation
示例13: _find_bad_epochs
def _find_bad_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the second step of the FASTER algorithm.
This function attempts to automatically mark bad epochs by performing
outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation'. Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
'amplitude': lambda x: np.mean(np.ptp(x, axis=2), axis=1),
'deviation': lambda x: np.mean(_deviation(x), axis=1),
'variance': lambda x: np.mean(np.var(x, axis=2), axis=1),
}
if use_metrics is None:
use_metrics = metrics.keys()
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = defaultdict(list)
for ch_type, chs in _picks_by_type(info):
logger.info('Bad epoch detection on %s channels:' % ch_type.upper())
for metric in use_metrics:
scores = metrics[metric](data[:, chs])
bad_epochs = find_outliers(scores, thresh, max_iter)
logger.info('\tBad by %s: %s' % (metric, bad_epochs))
bads[metric].append(bad_epochs)
bads = dict((k, np.concatenate(v).tolist()) for k, v in bads.items())
return bads
示例14: __init__
def __init__(self, input_fname, montage=None, eog=None,
misc=(-3, -2, -1), stim_channel=None, scale=1e-6, sfreq=250,
missing_tol=1, preload=True, verbose=None):
bci_info = {'missing_tol': missing_tol, 'stim_channel': stim_channel}
if not eog:
eog = list()
if not misc:
misc = list()
nsamps, nchan = self._get_data_dims(input_fname)
last_samps = [nsamps - 1]
ch_names = ['EEG %03d' % num for num in range(1, nchan + 1)]
ch_types = ['eeg'] * nchan
if misc:
misc_names = ['MISC %03d' % ii for ii in range(1, len(misc) + 1)]
misc_types = ['misc'] * len(misc)
for ii, mi in enumerate(misc):
ch_names[mi] = misc_names[ii]
ch_types[mi] = misc_types[ii]
if eog:
eog_names = ['EOG %03d' % ii for ii in range(len(eog))]
eog_types = ['eog'] * len(eog)
for ii, ei in enumerate(eog):
ch_names[ei] = eog_names[ii]
ch_types[ei] = eog_types[ii]
if stim_channel:
ch_names[stim_channel] = 'STI 014'
ch_types[stim_channel] = 'stim'
# fix it for eog and misc marking
info = create_info(ch_names, sfreq, ch_types, montage)
super(RawOpenBCI, self).__init__(info, last_samps=last_samps,
raw_extras=[bci_info],
filenames=[input_fname],
preload=False, verbose=verbose)
# load data
if preload:
self.preload = preload
logger.info('Reading raw data from %s...' % input_fname)
self._data, _ = self._read_segment()
示例15: get_svd_comps
def get_svd_comps(sub_leadfield, n_svd_comp):
""" Compute SVD components of sub-leadfield for selected channels
(all channels in one SVD)
Parameters:
-----------
sub_leadfield: numpy array (n_sens x n_vert) with part of the leadfield matrix
n_svd_comp: scalar, number of SVD components required
Returns:
--------
u_svd: numpy array, n_svd_comp scaled SVD components of subleadfield for
selected channels
s_svd: corresponding singular values
"""
u_svd, s_svd, _ = np.linalg.svd(sub_leadfield,
full_matrices=False,
compute_uv=True)
# get desired first vectors of u_svd
u_svd = u_svd[:, :n_svd_comp]
# project SVD components on sub-leadfield, take sum over vertices
u_svd_proj = u_svd.T.dot(sub_leadfield).sum(axis=1)
# make sure overall projection has positive sign
u_svd = u_svd.dot(np.sign(np.diag(u_svd_proj)))
u_svd = u_svd * s_svd[:n_svd_comp][np.newaxis, :]
logger.info("\nFirst 5 singular values (n=%d): %s" % (u_svd.shape[0], \
s_svd[0:5]))
# explained variance by chosen components within sub-leadfield
my_comps = s_svd[0:n_svd_comp]
comp_var = (100. * np.sum(my_comps * my_comps) /
np.sum(s_svd * s_svd))
logger.info("Your %d component(s) explain(s) %.1f%% "
"variance." % (n_svd_comp, comp_var))
return u_svd