本文整理汇总了Python中datalad.api.Dataset.install方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.install方法的具体用法?Python Dataset.install怎么用?Python Dataset.install使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类datalad.api.Dataset
的用法示例。
在下文中一共展示了Dataset.install方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_dont_trip_over_missing_subds
# 需要导入模块: from datalad.api import Dataset [as 别名]
# 或者: from datalad.api.Dataset import install [as 别名]
def test_dont_trip_over_missing_subds(path):
ds1 = Dataset(opj(path, 'ds1')).create()
ds2 = Dataset(opj(path, 'ds2')).create()
subds2 = ds1.install(
source=ds2.path, path='subds2',
result_xfm='datasets', return_type='item-or-list')
assert_true(subds2.is_installed())
assert_in('subds2', ds1.subdatasets(result_xfm='relpaths'))
subds2.uninstall()
assert_in('subds2', ds1.subdatasets(result_xfm='relpaths'))
assert_false(subds2.is_installed())
# see if it wants to talk to github (and fail), or if it trips over something
# before
assert_raises(gh.BadCredentialsException,
ds1.create_sibling_github, 'bogus', recursive=True,
github_login='disabledloginfortesting')
# inject remote config prior run
assert_not_in('github', ds1.repo.get_remotes())
# fail on existing
ds1.repo.add_remote('github', 'http://nothere')
assert_raises(ValueError,
ds1.create_sibling_github, 'bogus', recursive=True,
github_login='disabledloginfortesting')
# talk to github when existing is OK
assert_raises(gh.BadCredentialsException,
ds1.create_sibling_github, 'bogus', recursive=True,
github_login='disabledloginfortesting', existing='reconfigure')
# return happy emptiness when all is skipped
assert_equal(
ds1.create_sibling_github(
'bogus', recursive=True,
github_login='disabledloginfortesting', existing='skip'),
[])
示例2: test_aggregate_with_missing_or_duplicate_id
# 需要导入模块: from datalad.api import Dataset [as 别名]
# 或者: from datalad.api.Dataset import install [as 别名]
def test_aggregate_with_missing_or_duplicate_id(path):
# a hierarchy of three (super/sub)datasets, each with some native metadata
ds = Dataset(opj(path, 'origin')).create(force=True)
subds = ds.create('sub', force=True)
subds.remove(opj('.datalad', 'config'), if_dirty='ignore')
assert_false(exists(opj(subds.path, '.datalad', 'config')))
subsubds = subds.create('subsub', force=True)
# aggregate from bottom to top, guess native data, no compacting of graph
# should yield 6 meta data sets, one implicit, and one native per dataset
# and a second native set for the topmost dataset
aggregate_metadata(ds, guess_native_type=True, recursive=True)
# no only ask the top superdataset, no recursion, just reading from the cache
meta = get_metadata(
ds, guess_type=False, ignore_subdatasets=False, ignore_cache=False)
# and we know nothing subsub
for name in ('grandchild_äöü東',):
assert_true(sum([s.get('name', '') == assure_unicode(name) for s in meta]))
# but search should not fail
with swallow_outputs():
res1 = list(search_('.', regex=True, dataset=ds))
assert res1
# and let's see now if we wouldn't fail if dataset is duplicate if we
# install the same dataset twice
subds_clone = ds.install(source=subds.path, path="subds2")
with swallow_outputs():
res2 = list(search_('.', regex=True, dataset=ds))