当前位置: 首页>>代码示例>>Python>>正文


Python Raw.index_as_time方法代码示例

本文整理汇总了Python中mne.io.Raw.index_as_time方法的典型用法代码示例。如果您正苦于以下问题:Python Raw.index_as_time方法的具体用法?Python Raw.index_as_time怎么用?Python Raw.index_as_time使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在mne.io.Raw的用法示例。


在下文中一共展示了Raw.index_as_time方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_raw_index_as_time

# 需要导入模块: from mne.io import Raw [as 别名]
# 或者: from mne.io.Raw import index_as_time [as 别名]
def test_raw_index_as_time():
    """ Test index as time conversion"""
    raw = Raw(fif_fname, preload=True)
    t0 = raw.index_as_time([0], True)[0]
    t1 = raw.index_as_time([100], False)[0]
    t2 = raw.index_as_time([100], True)[0]
    assert_true((t2 - t1) == t0)
    # ensure we can go back and forth
    t3 = raw.index_as_time(raw.time_as_index([0], True), True)
    assert_array_almost_equal(t3, [0.0], 2)
    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], True), True)
    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
    t3 = raw.index_as_time(raw.time_as_index(raw.info['sfreq'], False), False)
    assert_array_almost_equal(t3, [raw.info['sfreq']], 2)
    i0 = raw.time_as_index(raw.index_as_time([0], True), True)
    assert_true(i0[0] == 0)
    i1 = raw.time_as_index(raw.index_as_time([100], True), True)
    assert_true(i1[0] == 100)
    # Have to add small amount of time because we truncate via int casting
    i1 = raw.time_as_index(raw.index_as_time([100.0001], False), False)
    assert_true(i1[0] == 100)
开发者ID:pombreda,项目名称:mne-python,代码行数:23,代码来源:test_raw.py

示例2: make_ecr_events

# 需要导入模块: from mne.io import Raw [as 别名]
# 或者: from mne.io.Raw import index_as_time [as 别名]
def make_ecr_events(raw_file, data_file, out_file, pattern=False):

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Load behavioral file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    data = read_csv(data_file)
    n_events, _ = data.shape
    n_trials, _ = data[ data.Condition != 0 ].shape # <--- Excludes rest trials.
    n_responses = (~np.isnan(data[data.Condition!=0].ResponseOnset)).sum()

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Load raw file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    raw = Raw(raw_file,preload=False,verbose=False)
    stim_onsets = find_events(raw, stim_channel='STI001', output='onset', verbose=False)
    response_onsets = find_events(raw, stim_channel='STI002', output='onset', verbose=False)

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Error catching.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#

    if n_events != stim_onsets.shape[0]:
        raise ValueError('Number of trial onsets in %s and %s do not match!' %(data_file,raw_file))
    elif n_responses != response_onsets.shape[0]:
        raise ValueError('Number of responses in %s and %s do not match!' %(data_file,raw_file))
    else:
        pass

    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
    ### Make events file.
    #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#

    # Ammend Conflict and ResponseAccuracy Categories.
    data['Conflict'] = np.where( data.Conflict == 0, 1, data.Conflict ) # No Conflict: [0,1] --> 1
    data['ResponseAccuracy'] = np.where( data.ResponseAccuracy == 0, 2, data.ResponseAccuracy ) # Accuracy: 0 --> 2
    data['ResponseAccuracy'] = np.where( data.ResponseAccuracy ==99, 3, data.ResponseAccuracy ) # Accuracy:99 --> 3


    # Append Word Valence category.
    data['WordValence'] = np.where(# Con + Angry Face = Angry Word
                                  (data.Condition == 1) & (data.Valence == 1), 1, np.where(
                                   # Con + Happy Face = Happy Word
                                  (data.Condition == 1) & (data.Valence == 2), 2, np.where( 
                                   # Incon + Angry Face = Happy Word
                                  (data.Condition == 2) & (data.Valence == 1), 2, np.where(
                                   # Incon + Happy Face = Angry Word
                                  (data.Condition == 2) & (data.Valence == 2), 1, 99 ))))

    # Make unique identifiers.
    data['StimIDs'] = '1' + data.Condition.map(str) + data.Conflict.map(str) + data.Valence.map(str) +\
                       data.WordValence.map(str) + data.ResponseAccuracy.map(str)
    data['RespIDs'] = '2' + data.Condition.map(str) + data.Conflict.map(str) + data.Valence.map(str) +\
                       data.WordValence.map(str) + data.ResponseAccuracy.map(str)

    # Add identifiers to onset arrays.
    stim_onsets = stim_onsets[np.where(data.Condition == 0, False, True), :]
    stim_onsets[:,2] = data.StimIDs[data.Condition != 0].astype(int)
    response_onsets[:,2] = data.RespIDs[data.ResponseKey != 99].astype(int)

    # Merge and sort.
    events = np.concatenate([stim_onsets,response_onsets])
    events = events[events[:,0].argsort(),:]
    
    # Reduce to pattern.
    if pattern:
        p = re.compile(pattern)
        idx, = np.where( [True if re.findall(p,event.astype(str)) else False for event in events[:,2]] )
        events = events[idx,:]

    # Insert first sample.
    events = np.insert(events, 0, [raw.first_samp, 0, 0], 0)

    # Write to fif file.
    write_events(out_file, events)

    # Write to text file.
    if out_file.endswith('.fif'): out_file = out_file[:-4] + '.txt'
    else: out_file = out_file + '.txt'
    for n, p in enumerate(pattern): events[:,2] = np.where( [True if re.findall(p,event.astype(str)) else False for event in events[:,2]], n+1, events[:,2])
    events = np.insert(events, 1, raw.index_as_time(events[:,0]), 1)
    np.savetxt(out_file, events, fmt = '%s', header = 'pattern = %s' %' | '.join(pattern))
开发者ID:ofek-schechner,项目名称:mmvt,代码行数:83,代码来源:make_ecr_events.py


注:本文中的mne.io.Raw.index_as_time方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。