当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.get_subdatasets方法代码示例

本文整理汇总了Python中datalad.distribution.dataset.Dataset.get_subdatasets方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.get_subdatasets方法的具体用法?Python Dataset.get_subdatasets怎么用?Python Dataset.get_subdatasets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在datalad.distribution.dataset.Dataset的用法示例。


在下文中一共展示了Dataset.get_subdatasets方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_dirty

# 需要导入模块: from datalad.distribution.dataset import Dataset [as 别名]
# 或者: from datalad.distribution.dataset.Dataset import get_subdatasets [as 别名]
def test_dirty(path):
    for mode in _dirty_modes:
        # does nothing without a dataset
        handle_dirty_dataset(None, mode)
    # placeholder, but not yet created
    ds = Dataset(path)
    # unknown mode
    assert_raises(ValueError, handle_dirty_dataset, ds, 'MADEUP')
    # not yet created is very dirty
    assert_raises(RuntimeError, handle_dirty_dataset, ds, 'fail')
    handle_dirty_dataset(ds, 'ignore')
    assert_raises(RuntimeError, handle_dirty_dataset, ds, 'save-before')
    # should yield a clean repo
    ds.create()
    orig_state = ds.repo.get_hexsha()
    _check_all_clean(ds, orig_state)
    # tainted: untracked
    with open(opj(ds.path, 'something'), 'w') as f:
        f.write('some')
    orig_state = _check_auto_save(ds, orig_state)
    # tainted: staged
    with open(opj(ds.path, 'staged'), 'w') as f:
        f.write('some')
    ds.repo.add('staged', git=True)
    orig_state = _check_auto_save(ds, orig_state)
    # tainted: submodule
    # not added to super on purpose!
    subds = ds.create('subds')
    _check_all_clean(subds, subds.repo.get_hexsha())
    ok_clean_git(ds.path)
    # subdataset must be added as a submodule!
    assert_equal(ds.get_subdatasets(), ['subds'])
开发者ID:debanjum,项目名称:datalad,代码行数:34,代码来源:test_utils.py

示例2: process_vanished_paths

# 需要导入模块: from datalad.distribution.dataset import Dataset [as 别名]
# 或者: from datalad.distribution.dataset.Dataset import get_subdatasets [as 别名]
def process_vanished_paths(unavailable_paths, content_by_ds):
    # presently unavailable paths could be, e.g., deleted files, or
    # uninstalled subdatasets, or simply nothing -> figure it out and act
    # accordingly
    dsinfo = {}
    nonexistent_paths = []
    for p in unavailable_paths:
        # we need to check whether any of these correspond
        # to a known subdataset, and add those to the list of
        # things to be removed
        toppath = get_dataset_root(p)
        if not toppath:
            nonexistent_paths.append(p)
            continue
        ds = Dataset(toppath)
        dinfo = dsinfo.get(toppath,
                           {'deleted': ds.repo.get_deleted_files(),
                            'subds': ds.get_subdatasets(
                                recursive=False, absolute=True)})
        # cache for a potentially following request
        dsinfo[toppath] = dinfo
        if p in dinfo['subds']:
            # test for subds needs to come first, as it would also show
            # up in "deleted_files"
            # this is a known subdataset that has vanished
            lgr.debug('deinit vanished subdataset {} in {}'.format(p, ds))
            # simply deinit to complete a "forced uninstallation", without
            # an explicit "remove" there is nothing to be save in this
            # case
            ds.repo.deinit_submodule(p[len(_with_sep(ds.path)):])
        elif p in dinfo['deleted']:
            # vanished file -> 'git rm' it to stage the change
            ds.repo.remove(p)
            # record that we are "saving" this path
            dpaths = content_by_ds.get(ds.path, [])
            dpaths.append(p)
            content_by_ds[ds.path] = dpaths
        else:
            # this is nothing we can anyhow handle
            nonexistent_paths.append(p)
    return content_by_ds, nonexistent_paths
开发者ID:debanjum,项目名称:datalad,代码行数:43,代码来源:save.py

示例3: test_recursive_save

# 需要导入模块: from datalad.distribution.dataset import Dataset [as 别名]
# 或者: from datalad.distribution.dataset.Dataset import get_subdatasets [as 别名]
def test_recursive_save(path):
    ds = Dataset(path).create()
    # nothing to save
    assert_false(ds.save())
    subds = ds.create('sub')
    # subdataset presence already saved
    ok_clean_git(ds.path)
    subsubds = subds.create('subsub')
    assert_equal(
        ds.get_subdatasets(recursive=True, absolute=True, fulfilled=True),
        [subsubds.path, subds.path])
    newfile_name = opj(subsubds.path, 'test')
    with open(newfile_name, 'w') as f:
        f.write('some')
    # saves the status change of the subdataset due to the subsubdataset addition
    assert_equal(ds.save(all_changes=True), [ds])

    # make the new file known to its dataset
    # with #1141 this would be
    #ds.add(newfile_name, save=False)
    subsubds.add(newfile_name, save=False)

    # but remains dirty because of the untracked file down below
    assert ds.repo.dirty
    # auto-add will save nothing deep down without recursive
    assert_equal(ds.save(all_changes=True), [])
    assert ds.repo.dirty
    # with recursive pick up the change in subsubds
    assert_equal(ds.save(all_changes=True, recursive=True), [subsubds, subds, ds])
    # modify content in subsub and try saving
    testfname = newfile_name
    subsubds.unlock(testfname)
    with open(opj(ds.path, testfname), 'w') as f:
        f.write('I am in here!')
    # the following should all do nothing
    # no auto_add
    assert_false(ds.save())
    # no recursive
    assert_false(ds.save(all_changes=True))
    # an explicit target saves only the corresponding dataset
    assert_equal(save(files=[testfname]), [subsubds])
    # plain recursive without any files given will save the beast
    assert_equal(ds.save(recursive=True), [subds, ds])
    # there is nothing else to save
    assert_false(ds.save(all_changes=True, recursive=True))
    # one more time and check that all datasets in the hierarchy get updated
    states = [d.repo.get_hexsha() for d in (ds, subds, subsubds)]
    testfname = opj('sub', 'subsub', 'saveme2')
    with open(opj(ds.path, testfname), 'w') as f:
        f.write('I am in here!')
    assert_true(ds.save(all_changes=True, recursive=True))
    newstates = [d.repo.get_hexsha() for d in (ds, subds, subsubds)]
    for old, new in zip(states, newstates):
        assert_not_equal(old, new)
    # now let's check saving "upwards"
    assert not subds.repo.dirty
    create_tree(subds.path, {"testnew": 'smth', "testadded": "added"})
    subds.repo.add("testadded")
    indexed_files = subds.repo.get_indexed_files()
    assert subds.repo.dirty
    assert ds.repo.dirty

    assert not subsubds.repo.dirty
    create_tree(subsubds.path, {"testnew2": 'smth'})
    assert subsubds.repo.dirty
    # and indexed files didn't change
    assert_equal(indexed_files, subds.repo.get_indexed_files())
    ok_clean_git(subds.repo, untracked=['testnew'],
                 index_modified=['subsub'], head_modified=['testadded'])
    subsubds.save(message="savingtestmessage", super_datasets=True,
                  all_changes=True)
    ok_clean_git(subsubds.repo)
    # but its super should have got only the subsub saved
    # not the file we created
    ok_clean_git(subds.repo, untracked=['testnew'], head_modified=['testadded'])

    # check commits to have correct messages
    # there are no more dedicated superdataset-save commits anymore, because
    # superdatasets get saved as part of the processed hierarchy and can contain
    # other parts in the commit (if so instructed)
    assert_equal(next(subsubds.repo.get_branch_commits('master')).message.rstrip(),
                 'savingtestmessage')
    assert_equal(next(subds.repo.get_branch_commits('master')).message.rstrip(),
                 'savingtestmessage')
    assert_equal(next(ds.repo.get_branch_commits('master')).message.rstrip(),
                 'savingtestmessage')
开发者ID:debanjum,项目名称:datalad,代码行数:88,代码来源:test_save.py


注:本文中的datalad.distribution.dataset.Dataset.get_subdatasets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。