本文整理汇总了Python中datalad.distribution.dataset.Dataset.get_superdataset方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.get_superdataset方法的具体用法?Python Dataset.get_superdataset怎么用?Python Dataset.get_superdataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类datalad.distribution.dataset.Dataset
的用法示例。
在下文中一共展示了Dataset.get_superdataset方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _DelayedSuper
# 需要导入模块: from datalad.distribution.dataset import Dataset [as 别名]
# 或者: from datalad.distribution.dataset.Dataset import get_superdataset [as 别名]
class _DelayedSuper(object):
"""A helper to delay deduction on super dataset until needed
But if asked and not found -- blow up
"""
def __init__(self, repo):
self._child_dataset = Dataset(repo.path)
self._super = None
def __str__(self):
return str(self.super)
@property
def super(self):
if self._super is None:
# here we must analyze current_ds's super, not the super_ds
self._super = self._child_dataset.get_superdataset()
if not self._super:
raise RuntimeError(
"Cannot determine super dataset for %s, thus "
"cannot inherit anything" % self._child_dataset
)
return self._super
# Lean proxies going through .super
@property
def config(self):
return self.super.config
@property
def repo(self):
return self.super.repo
示例2: __call__
# 需要导入模块: from datalad.distribution.dataset import Dataset [as 别名]
# 或者: from datalad.distribution.dataset.Dataset import get_superdataset [as 别名]
def __call__(
path=None,
dataset=None,
recursive=False,
check=True,
if_dirty='save-before'):
if dataset and not path:
# act on the whole dataset if nothing else was specified
path = dataset.path if isinstance(dataset, Dataset) else dataset
content_by_ds, unavailable_paths = Interface._prep(
path=path,
dataset=dataset,
recursive=recursive)
if unavailable_paths:
lgr.warning('ignored non-installed paths: %s', unavailable_paths)
# upfront sanity and compliance checks
if path_is_under(content_by_ds.keys()):
# behave like `rm` and refuse to remove where we are
raise ValueError(
"refusing to uninstall current or parent directory")
# check that we have no top-level datasets and not files to process
args_ok = True
for ds_path in content_by_ds:
ds = Dataset(ds_path)
paths = content_by_ds[ds_path]
if ds_path not in paths:
lgr.error(
"will not act on files at %s (consider the `drop` command)",
paths)
args_ok = False
if not ds.get_superdataset(
datalad_only=False,
topmost=False):
lgr.error(
"will not uninstall top-level dataset at %s (consider the `remove` command)",
ds.path)
args_ok = False
if not args_ok:
raise ValueError(
'inappropriate arguments, see previous error message(s)')
handle_dirty_datasets(
content_by_ds, mode=if_dirty, base=dataset)
results = []
# iterate over all datasets, starting at the bottom
# to deinit contained submodules first
for ds_path in sorted(content_by_ds, reverse=True):
ds = Dataset(ds_path)
paths = content_by_ds[ds_path]
results.extend(
# we confirmed the super dataset presence above
_uninstall_dataset(ds, check=check, has_super=True))
# there is nothing to save at the end
return results