本文整理汇总了Python中vsc.utils.missing.nub函数的典型用法代码示例。如果您正苦于以下问题:Python nub函数的具体用法?Python nub怎么用?Python nub使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nub函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: list_software_txt
def list_software_txt(software, detailed=False):
"""
Return overview of supported software in plain text
:param software: software information (structured like list_software does)
:param detailed: whether or not to return detailed information (incl. version, versionsuffix, toolchain info)
:return: multi-line string presenting requested info
"""
lines = ['']
for key in sorted(software, key=lambda x: x.lower()):
lines.append('* %s' % key)
if detailed:
lines.extend([
'',
' '.join(software[key][-1]['description'].split('\n')),
'',
"homepage: %s" % software[key][-1]['homepage'],
'',
])
pairs = nub((x['version'], x['versionsuffix']) for x in software[key])
for ver, vsuff in sorted((LooseVersion(v), vs) for (v, vs) in pairs):
tcs = [x['toolchain'] for x in software[key] if x['version'] == ver and x['versionsuffix'] == vsuff]
line = " * %s v%s" % (key, ver)
if vsuff:
line += " (versionsuffix: '%s')" % vsuff
line += ": %s" % ', '.join(sorted(nub(tcs)))
lines.append(line)
lines.append('')
return '\n'.join(lines)
示例2: configure_step
def configure_step(self, srcdir=None, builddir=None):
"""Configure build using cmake"""
if builddir is not None:
self.log.nosupport("CMakeMake.configure_step: named argument 'builddir' (should be 'srcdir')", "2.0")
# Set the search paths for CMake
tc_ipaths = self.toolchain.get_variable("CPPFLAGS", list)
tc_lpaths = self.toolchain.get_variable("LDFLAGS", list)
cpaths = os.getenv('CPATH', '').split(os.pathsep)
lpaths = os.getenv('LD_LIBRARY_PATH', '').split(os.pathsep)
include_paths = os.pathsep.join(nub(tc_ipaths + cpaths))
library_paths = os.pathsep.join(nub(tc_lpaths + lpaths))
setvar("CMAKE_INCLUDE_PATH", include_paths)
setvar("CMAKE_LIBRARY_PATH", library_paths)
default_srcdir = '.'
if self.cfg.get('separate_build_dir', False):
objdir = os.path.join(self.builddir, 'easybuild_obj')
try:
os.mkdir(objdir)
os.chdir(objdir)
except OSError, err:
raise EasyBuildError("Failed to create separate build dir %s in %s: %s", objdir, os.getcwd(), err)
default_srcdir = self.cfg['start_dir']
示例3: set_mod_paths
def set_mod_paths(self, mod_paths=None):
"""Set mod_paths, based on $MODULEPATH unless a list of module paths is specified."""
# make sure we don't have the same path twice, using nub
if mod_paths is not None:
self.mod_paths = nub(mod_paths)
for mod_path in self.mod_paths:
self.prepend_module_path(mod_path)
else:
# no paths specified, so grab list of (existing) module paths from $MODULEPATH
self.mod_paths = [p for p in nub(curr_module_paths()) if os.path.exists(p)]
self.log.debug("$MODULEPATH after set_mod_paths: %s" % os.environ.get('MODULEPATH', ''))
示例4: test_conflicts
def test_conflicts(self):
"""Check whether any conflicts occur in software dependency graphs."""
if not single_tests_ok:
print "(skipped conflicts test)"
return
if self.ordered_specs is None:
self.process_all_easyconfigs()
def mk_dep_mod_name(spec):
return tuple(EasyBuildMNS().det_full_module_name(spec).split(os.path.sep))
# construct a dictionary: (name, installver) tuple to (build) dependencies
depmap = {}
for spec in self.ordered_specs:
# exclude external modules, since we can't check conflicts on them (we don't even know the software name)
build_deps = [mk_dep_mod_name(d) for d in spec['builddependencies'] if not d.get('external_module', False)]
deps = [mk_dep_mod_name(d) for d in spec['ec'].all_dependencies if not d.get('external_module', False)]
# separate runtime deps from build deps
runtime_deps = [d for d in deps if d not in build_deps]
key = tuple(spec['full_mod_name'].split(os.path.sep))
depmap.update({key: [build_deps, runtime_deps]})
# iteratively expand list of dependencies
depmap_last = None
while depmap != depmap_last:
depmap_last = copy.deepcopy(depmap)
for (spec, (build_deps, runtime_deps)) in depmap_last.items():
# extend runtime dependencies with non-build dependencies of own runtime dependencies
for dep in runtime_deps:
depmap[spec][1].extend([d for d in depmap[dep][1] if d not in depmap[dep][0]])
depmap[spec][1] = sorted(nub(depmap[spec][1]))
# extend build dependencies with non-build dependencies of own build dependencies
for dep in build_deps:
depmap[spec][0].extend([d for d in depmap[dep][1] if d not in depmap[dep][0]])
depmap[spec][0] = sorted(nub(depmap[spec][0]))
def check_conflict((name, installver), (name1, installver1), (name2, installver2)):
"""Check whether dependencies with given name/(install) version conflict with each other."""
# dependencies with the same name should have the exact same install version
# if not => CONFLICT!
if name1 == name2 and installver1 != installver2:
specname = '%s-%s' % (name, installver)
vs_msg = "%s-%s vs %s-%s" % (name1, installver1, name2, installver2)
print "Conflict found for dependencies of %s: %s" % (specname, vs_msg)
return True
else:
return False
示例5: get_convert_class
def get_convert_class(class_name):
"""Return the Convert class with specified class name class_name"""
res = [x for x in nub(get_subclasses(Convert)) if x.__name__ == class_name]
if len(res) == 1:
return res[0]
else:
_log.error('More then one Convert subclass found for name %s: %s' % (class_name, res))
示例6: list_toolchains_rst
def list_toolchains_rst(tcs):
""" Returns overview of all toolchains in rst format """
title = "List of known toolchains"
# figure out column names
table_titles = ['name', 'compiler', 'MPI']
for tc in tcs.values():
table_titles.extend(tc.keys())
col_names = {
'COMPILER_CUDA': 'CUDA compiler',
'SCALAPACK': 'ScaLAPACK',
}
table_titles = nub(table_titles)
table_values = [[] for i in range(len(table_titles))]
table_values[0] = ['**%s**' % tcname for tcname in tcs.keys()]
for idx in range(1, len(table_titles)):
for tc in tcs.values():
table_values[idx].append(', '.join(tc.get(table_titles[idx].upper(), [])))
table_titles = [col_names.get(col, col) for col in table_titles]
doc = rst_title_and_table(title, table_titles, table_values)
return '\n'.join(doc)
示例7: __init__
def __init__(self, mod_paths=None):
"""
Create a ModulesTool object
@param mod_paths: A list of paths where the modules can be located
@type mod_paths: list
"""
self.log = fancylogger.getLogger(self.__class__.__name__, fname=False)
# make sure we don't have the same path twice
if mod_paths:
self.mod_paths = nub(mod_paths)
else:
self.mod_paths = None
# DEPRECATED!
self._modules = []
self.check_module_path()
# actual module command (i.e., not the 'module' wrapper function, but the binary)
self.cmd = None
# shell that should be used to run module command (specified above) in (if any)
self.shell = None
# version of modules tool
self.version = None
# terse command line option
self.add_terse_opt_fn = lambda x: x.insert(0, '--terse')
示例8: check_module_path
def check_module_path(self):
"""
Check if MODULEPATH is set and change it if necessary.
"""
if not 'MODULEPATH' in os.environ:
errormsg = 'MODULEPATH not found in environment'
# check if environment-modules is found
module_regexp = re.compile(r"^module is a function\s*\nmodule\s*()")
cmd = "type module"
(out, ec) = run_cmd(cmd, log_all=False, log_ok=False)
if ec != 0 or not module_regexp.match(out):
errormsg += "; environment-modules doesn't seem to be installed: "
errormsg += "'%s' failed with exit code %s and output: '%s'" % (cmd, ec, out.strip('\n'))
self.log.error(errormsg)
if self.mod_paths:
# set the module path environment accordingly
os.environ['MODULEPATH'] = ':'.join(self.mod_paths)
self.log.debug("$MODULEPATH set based on supplied list of module paths: %s" % os.environ['MODULEPATH'])
else:
# take module path from environment
self.mod_paths = nub(os.environ['MODULEPATH'].split(':'))
self.log.debug("self.mod_paths set based on $MODULEPATH: %s" % self.mod_paths)
if not 'LOADEDMODULES' in os.environ:
os.environ['LOADEDMODULES'] = ''
示例9: test_env_variables
def test_env_variables(self):
""" Test the passing of (extra) variables """
fake_mpirun_env = """#!/bin/bash
echo 'fake mpirun called with args:' [email protected]
env
"""
install_fake_mpirun('mpirun', self.tmpdir, 'impi', '5.1.2', txt=fake_mpirun_env)
os.environ['PYTHONPATH'] = '/just/an/example:%s' % os.getenv('PYTHONPATH', '')
command = [
sys.executable,
self.mympiscript,
"--variablesprefix=USER",
"hostname",
]
ec, out = run(command)
for key in nub(filter(os.environ.has_key, MPI.OPTS_FROM_ENV_BASE)):
self.assertTrue(key in out, "%s is not in out" % key)
regex = r'.*-envlist [^ ]*USER.*'
self.assertTrue(re.search(regex, out), "Variablesprefix USER isn't passed to mympirun script env")
regex = r'PYTHONPATH=/just/an/example:.*'
self.assertTrue(re.search(regex, out), "PYTHONPATH isn't passed to mympirun script env correctly: %s" % out)
示例10: list_filesets
def list_filesets(self, devices=None, filesetnames=None, update=False):
"""Get all the filesets for one or more specific devices
@type devices: list of devices (if string: 1 device; if None: all found devices)
@type filesetnames: report only on specific filesets (if string: 1 filesetname)
set self.gpfslocalfilesets is dict with
key = filesystemName value is dict with
key = id value is dict
key = remaining header entries and corresponding values
"""
if not update and self.gpfslocalfilesets:
return self.gpfslocalfilesets
opts = []
if devices is None:
# get all devices from all filesystems
if self.gpfslocalfilesystems is None:
self.list_filesystems()
devices = self.gpfslocalfilesystems.keys()
else:
if isinstance(devices, str):
devices = [devices]
if filesetnames is not None:
if isinstance(filesetnames, str):
filesetnames = [filesetnames]
filesetnamestxt = ','.join(filesetnames)
opts.append(filesetnamestxt)
self.log.debug("Looking up filesets for devices %s" % (devices))
listm = Monoid([], lambda xs, ys: xs + ys)
info = MonoidDict(listm)
for device in devices:
opts_ = copy.deepcopy(opts)
opts_.insert(1, device)
res = self._executeY('mmlsfileset', opts_)
# for v3.5 filesystemName:filesetName:id:rootInode:status:path:parentId:created:inodes:dataInKB:comment:filesetMode:afmTarget:afmState:afmMode:afmFileLookupRefreshInterval:afmFileOpenRefreshInterval:afmDirLookupRefreshInterval:afmDirOpenRefreshInterval:afmAsyncDelay:reserved:afmExpirationTimeout:afmRPO:afmLastPSnapId:inodeSpace:isInodeSpaceOwner:maxInodes:allocInodes:inodeSpaceMask:afmShowHomeSnapshots:afmNumReadThreads:afmNumReadGWs:afmReadBufferSize:afmWriteBufferSize:afmReadSparseThreshold:afmParallelReadChunkSize:afmParallelReadThreshold:snapId:
self.log.debug("list_filesets res keys = %s " % (res.keys()))
for (key, value) in res.items():
info[key] = value
datakeys = info.keys()
datakeys.remove('filesystemName')
datakeys.remove('id')
fss = nub(info.get('filesystemName', []))
res = dict([(fs, {}) for fs in fss]) # build structure
for idx, (fs, qid) in enumerate(zip(info['filesystemName'], info['id'])):
details = dict([(k, info[k][idx]) for k in datakeys])
res[fs][qid] = details
self.gpfslocalfilesets = res
return res
示例11: test_make_machine_file
def test_make_machine_file(self):
"""test if the machinefile is made and if it contains the same amount of nodes as mpinodes"""
mpi_instance = getinstance(mpim.MPI, Local, MympirunOption())
mpi_instance.make_machine_file()
self.assertTrue(os.path.isfile(mpi_instance.mpiexec_node_filename), msg="the nodefile has not been created")
# test if amount of lines in nodefile matches amount of nodes
with open(mpi_instance.mpiexec_node_filename) as file:
index = 0
for index, _ in enumerate(file):
pass
self.assertEqual(len(mpi_instance.mpinodes), index+1,
msg="mpinodes doesn't match the amount of nodes in the nodefile")
# disable make_mympirundir
mpi_instance.make_mympirundir = lambda: True
mpi_instance.mympirundir = '/does/not/exist/'
self.assertErrorRegex(IOError, "failed to write nodefile", mpi_instance.make_machine_file)
# openmpi oversubscribing
mpi_instance = getinstance(OpenMpiOversubscribe, Local, MympirunOption())
mpi_instance.options.double = True
mpi_instance.set_multiplier()
mpi_instance.make_machine_file()
with open(mpi_instance.mpiexec_node_filename) as file:
n_slots = mpi_instance.ppn
regex = re.compile("slots=%s" % n_slots)
machinefile = file.read()
self.assertTrue(regex.search(machinefile), "Regex %s not found in %s" % (regex.pattern, machinefile))
self.assertEqual(len(nub(mpi_instance.mpinodes)), len(machinefile.strip().split('\n')),
msg="mpinodes doesn't match the amount of nodes in the nodefile")
示例12: validate_iterate_opts_lists
def validate_iterate_opts_lists(self):
"""
Configure/build/install options specified as lists should have same length.
"""
# configure/build/install options may be lists, in case of an iterated build
# when lists are used, they should be all of same length
# list of length 1 are treated as if it were strings in EasyBlock
opt_counts = []
for opt in ITERATE_OPTIONS:
# anticipate changes in available easyconfig parameters (e.g. makeopts -> buildopts?)
if self.get(opt, None) is None:
self.log.error("%s not available in self.cfg (anymore)?!" % opt)
# keep track of list, supply first element as first option to handle
if isinstance(self[opt], (list, tuple)):
opt_counts.append((opt, len(self[opt])))
# make sure that options that specify lists have the same length
list_opt_lengths = [length for (opt, length) in opt_counts if length > 1]
if len(nub(list_opt_lengths)) > 1:
self.log.error("Build option lists for iterated build should have same length: %s" % opt_counts)
return True
示例13: get_convert_class
def get_convert_class(class_name):
"""Return the Convert class with specified class name class_name"""
res = [x for x in nub(get_subclasses(Convert)) if x.__name__ == class_name]
if len(res) == 1:
return res[0]
else:
raise EasyBuildError("More than one Convert subclass found for name %s: %s", class_name, res)
示例14: tweak
def tweak(easyconfigs, build_specs):
"""Tweak list of easyconfigs according to provided build specifications."""
# make sure easyconfigs all feature the same toolchain (otherwise we *will* run into trouble)
toolchains = nub(['%(name)s/%(version)s' % ec['ec']['toolchain'] for ec in easyconfigs])
if len(toolchains) > 1:
_log.error("Multiple toolchains featured in easyconfigs, --try-X not supported in that case: %s" % toolchains)
# obtain full dependency graph for specified easyconfigs
# easyconfigs will be ordered 'top-to-bottom': toolchain dependencies and toolchain first
orig_ecs = resolve_dependencies(easyconfigs, retain_all_deps=True)
# determine toolchain based on last easyconfigs
toolchain = orig_ecs[-1]['ec']['toolchain']
_log.debug("Filtering using toolchain %s" % toolchain)
# filter easyconfigs unless a dummy toolchain is used: drop toolchain and toolchain dependencies
if toolchain['name'] != DUMMY_TOOLCHAIN_NAME:
while orig_ecs[0]['ec']['toolchain'] != toolchain:
orig_ecs = orig_ecs[1:]
# generate tweaked easyconfigs, and continue with those instead
easyconfigs = []
for orig_ec in orig_ecs:
new_ec_file = tweak_one(orig_ec['spec'], None, build_specs)
new_ecs = process_easyconfig(new_ec_file, build_specs=build_specs)
easyconfigs.extend(new_ecs)
return easyconfigs
示例15: make_mpdboot_file
def make_mpdboot_file(self):
"""
Make an mpdbootfile.
Parses the list of unique nodes and writes this information to a mpdbootfile
(based on hydra and universe options).
"""
self.make_mympirundir()
if self.mpinodes is None:
self.set_mpinodes()
mpdboottxt = '\n'.join(nub(self.mpinodes))
mpdfn = os.path.join(self.mympirundir, 'mpdboot')
try:
fp = open(mpdfn, 'w')
fp.write(mpdboottxt)
fp.close()
except IOError as err:
msg = 'make_mpdboot_file: failed to write mpbboot file %s: %s' % (mpdfn, err)
self.log.raiseException(msg)
self.mpdboot_node_filename = mpdfn
self.log.debug("make_mpdboot_file: wrote mpdbootfile %s:\n%s", mpdfn, mpdboottxt)