本文整理汇总了Python中python_compat.ifilter函数的典型用法代码示例。如果您正苦于以下问题:Python ifilter函数的具体用法?Python ifilter怎么用?Python ifilter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ifilter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: strDict
def strDict(d, order = None):
if not order:
order = sorted(d.keys())
else:
order = list(order)
order.extend(ifilter(lambda x: x not in order, d.keys()))
return str.join(', ', imap(lambda k: '%s = %s' % (k, repr(d[k])), order))
示例2: _forwardCall
def _forwardCall(self, args, assignFun, callFun):
argMap = self._getMapID2Backend(args, assignFun)
def makeGenerator(wmsPrefix):
return callFun(self._wmsMap[wmsPrefix], argMap[wmsPrefix])
activeWMS = ifilter(lambda wmsPrefix: wmsPrefix in argMap, self._wmsMap)
for result in tchain(imap(makeGenerator, activeWMS)):
yield result
示例3: _check_get_jobnum_list
def _check_get_jobnum_list(self, task, wms, jobnum_list):
if self._defect_tries:
num_defect = len(self._defect_counter) # Waiting list gets larger in case reported == []
num_wait = num_defect - max(1, int(num_defect / 2 ** self._defect_raster))
jobnum_list_wait = self._sample(self._defect_counter, num_wait)
jobnum_list = lfilter(lambda jobnum: jobnum not in jobnum_list_wait, jobnum_list)
(change, jobnum_list_timeout, reported) = JobManager._check_get_jobnum_list(
self, task, wms, jobnum_list)
for jobnum in reported:
self._defect_counter.pop(jobnum, None)
if self._defect_tries and (change is not None):
# make 'raster' iteratively smaller
self._defect_raster += 1
if reported:
self._defect_raster = 1
for jobnum in ifilter(lambda x: x not in reported, jobnum_list):
self._defect_counter[jobnum] = self._defect_counter.get(jobnum, 0) + 1
jobnum_list_kick = lfilter(lambda jobnum: self._defect_counter[jobnum] >= self._defect_tries,
self._defect_counter)
if (len(reported) == 0) and (len(jobnum_list) == 1):
jobnum_list_kick.extend(jobnum_list)
for jobnum in set(jobnum_list_kick):
jobnum_list_timeout.append(jobnum)
self._defect_counter.pop(jobnum)
return (change, jobnum_list_timeout, reported)
示例4: _forward_call
def _forward_call(self, args, assign_fun, call_fun):
backend_name2args = self._get_map_backend_name2args(args, assign_fun)
avail_backend_name_list = sorted(self._map_backend_name2backend)
for backend_name in ifilter(backend_name2args.__contains__, avail_backend_name_list):
wms = self._map_backend_name2backend[backend_name]
for result in call_fun(wms, backend_name2args[backend_name]):
yield result
示例5: _submitJob
def _submitJob(self, jobNum, module):
fd, jdl = tempfile.mkstemp('.jdl')
try:
jdlData = self.makeJDL(jobNum, module)
utils.safeWrite(os.fdopen(fd, 'w'), jdlData)
except Exception:
utils.removeFiles([jdl])
raise BackendError('Could not write jdl data to %s.' % jdl)
try:
submitArgs = []
for key_value in utils.filterDict(self._submitParams, vF = lambda v: v).items():
submitArgs.extend(key_value)
submitArgs.append(jdl)
activity = Activity('submitting job %d' % jobNum)
proc = LocalProcess(self._submitExec, '--nomsg', '--noint', '--logfile', '/dev/stderr', *submitArgs)
gcID = None
for line in ifilter(lambda x: x.startswith('http'), imap(str.strip, proc.stdout.iter(timeout = 60))):
gcID = line
retCode = proc.status(timeout = 0, terminate = True)
activity.finish()
if (retCode != 0) or (gcID is None):
if self.explainError(proc, retCode):
pass
else:
self._log.log_process(proc, files = {'jdl': SafeFile(jdl).read()})
finally:
utils.removeFiles([jdl])
return (jobNum, utils.QM(gcID, self._createId(gcID), None), {'jdl': str.join('', jdlData)})
示例6: _getCategoryStateSummary
def _getCategoryStateSummary(self):
(catStateDict, catDescDict, catSubcatDict) = CategoryBaseReport._getCategoryStateSummary(self)
# Used for quick calculations
catLenDict = {}
for catKey in catStateDict:
catLenDict[catKey] = sum(catStateDict[catKey].values())
# Merge successfully completed categories
self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict,
'Completed subtasks', lfilter(lambda catKey:
(len(catStateDict[catKey]) == 1) and (Job.SUCCESS in catStateDict[catKey]), catStateDict))
# Next merge steps shouldn't see non-dict catKeys in catDescDict
hiddenDesc = {}
for catKey in ifilter(lambda catKey: not isinstance(catDescDict[catKey], dict), list(catDescDict)):
hiddenDesc[catKey] = catDescDict.pop(catKey)
# Merge categories till goal is reached
self._mergeCatsWithGoal(catStateDict, catDescDict, catSubcatDict, catLenDict, hiddenDesc)
# Remove redundant variables from description
varKeyResult = self._getKeyMergeResults(catDescDict)
self._clearCategoryDesc(varKeyResult, catDescDict)
# Restore hidden descriptions
catDescDict.update(hiddenDesc)
# Enforce category maximum - merge categories with the least amount of jobs
if len(catStateDict) != self._catMax:
self._mergeCats(catStateDict, catDescDict, catSubcatDict, catLenDict, 'Remaining subtasks',
sorted(catStateDict, key = lambda catKey: -catLenDict[catKey])[self._catMax - 1:])
# Finalize descriptions:
if len(catDescDict) == 1:
catDescDict[list(catDescDict.keys())[0]] = 'All jobs'
return (catStateDict, catDescDict, catSubcatDict)
示例7: __init__
def __init__(self, config):
ParameterFactory.__init__(self, config)
self._psrc_list = []
# Random number variables
jobs_config = config.change_view(add_sections=['jobs'])
self._random_variables = jobs_config.get_list('random variables', ['JOB_RANDOM'], on_change=None)
nseeds = jobs_config.get_int('nseeds', 10)
seeds_new = lmap(lambda x: str(random.randint(0, 10000000)), irange(nseeds))
self._random_seeds = jobs_config.get_list('seeds', seeds_new, persistent=True)
# Get constants from [constants <tags...>]
constants_config = config.change_view(view_class='TaggedConfigView',
set_classes=None, set_sections=['constants'], set_names=None)
constants_pconfig = ParameterConfig(constants_config)
for vn_const in ifilter(lambda opt: ' ' not in opt, constants_config.get_option_list()):
constants_config.set('%s type' % vn_const, 'verbatim', '?=')
self._register_psrc(constants_pconfig, vn_const.upper())
param_config = config.change_view(view_class='TaggedConfigView',
set_classes=None, add_sections=['parameters'], inherit_sections=True)
# Get constants from [<Module>] constants
task_pconfig = ParameterConfig(param_config)
for vn_const in param_config.get_list('constants', []):
config.set('%s type' % vn_const, 'verbatim', '?=')
self._register_psrc(task_pconfig, vn_const)
# Get global repeat value from 'parameters' section
self._repeat = param_config.get_int('repeat', -1, on_change=None)
self._req = param_config.get_bool('translate requirements', True, on_change=None)
self._pfactory = param_config.get_plugin('parameter factory', 'SimpleParameterFactory',
cls=ParameterFactory)
示例8: cancelJobs
def cancelJobs(self, allIds):
if len(allIds) == 0:
raise StopIteration
waitFlag = False
for ids in imap(lambda x: allIds[x:x+5], irange(0, len(allIds), 5)):
# Delete jobs in groups of 5 - with 5 seconds between groups
if waitFlag and not utils.wait(5):
break
waitFlag = True
jobNumMap = dict(ids)
jobs = self.writeWMSIds(ids)
activity = utils.ActivityLog('cancelling jobs')
proc = LocalProcess(self._cancelExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs)
retCode = proc.status(timeout = 60, terminate = True)
del activity
# select cancelled jobs
for deletedWMSId in ifilter(lambda x: x.startswith('- '), proc.stdout.iter()):
deletedWMSId = self._createId(deletedWMSId.strip('- \n'))
yield (jobNumMap.get(deletedWMSId), deletedWMSId)
if retCode != 0:
if self.explainError(proc, retCode):
pass
else:
self._log.log_process(proc, files = {'jobs': utils.safeRead(jobs)})
utils.removeFiles([jobs])
示例9: dump
def dump(self):
stored_logged = self.logged
self.logged = False
for data in str.join('', ifilter(identity, self._log)).splitlines():
self._console.eraseLine()
self.write(data + '\n')
self.logged = stored_logged
示例10: _build_blocks
def _build_blocks(self, map_key2fm_list, map_key2name, map_key2metadata_dict):
# Return named dataset
for key in sorted(map_key2fm_list):
result = {
DataProvider.Dataset: map_key2name[key[:1]],
DataProvider.BlockName: map_key2name[key[:2]],
}
fm_list = map_key2fm_list[key]
# Determine location_list
location_list = None
for file_location_list in ifilter(lambda s: s is not None, imap(itemgetter(3), fm_list)):
location_list = location_list or []
location_list.extend(file_location_list)
if location_list is not None:
result[DataProvider.Locations] = list(UniqueList(location_list))
# use first file [0] to get the initial metadata_dict [1]
metadata_name_list = list(fm_list[0][1].keys())
result[DataProvider.Metadata] = metadata_name_list
# translate file metadata into data provider file info entries
def _translate_fm2fi(url, metadata_dict, entries, location_list, obj_dict):
if entries is None:
entries = -1
return {DataProvider.URL: url, DataProvider.NEntries: entries,
DataProvider.Metadata: lmap(metadata_dict.get, metadata_name_list)}
result[DataProvider.FileList] = lsmap(_translate_fm2fi, fm_list)
yield result
示例11: __init__
def __init__(self, jobDB, task, jobs = None, configString = ''):
Report.__init__(self, jobDB, task, jobs, configString)
catJobs = {}
catDescDict = {}
# Assignment of jobs to categories (depending on variables and using datasetnick if available)
jobConfig = {}
for jobNum in self._jobs:
if task:
jobConfig = task.getJobConfig(jobNum)
varList = sorted(ifilter(lambda var: '!' not in repr(var), jobConfig.keys()))
if 'DATASETSPLIT' in varList:
varList.remove('DATASETSPLIT')
varList.append('DATASETNICK')
catKey = str.join('|', imap(lambda var: '%s=%s' % (var, jobConfig[var]), varList))
catJobs.setdefault(catKey, []).append(jobNum)
if catKey not in catDescDict:
catDescDict[catKey] = dict(imap(lambda var: (var, jobConfig[var]), varList))
# Kill redundant keys from description
commonVars = dict(imap(lambda var: (var, jobConfig[var]), varList)) # seed with last varList
for catKey in catDescDict:
for key in list(commonVars.keys()):
if key not in catDescDict[catKey].keys():
commonVars.pop(key)
elif commonVars[key] != catDescDict[catKey][key]:
commonVars.pop(key)
for catKey in catDescDict:
for commonKey in commonVars:
catDescDict[catKey].pop(commonKey)
# Generate job-category map with efficient int keys - catNum becomes the new catKey
self._job2cat = {}
self._catDescDict = {}
for catNum, catKey in enumerate(sorted(catJobs)):
self._catDescDict[catNum] = catDescDict[catKey]
self._job2cat.update(dict.fromkeys(catJobs[catKey], catNum))
示例12: _getBlocksInternal
def _getBlocksInternal(self):
# Split files into blocks/datasets via key functions and determine metadata intersection
(protoBlocks, commonDS, commonB) = ({}, {}, {})
def getActiveKeys(kUser, kGuard, gIdx):
return kUser + (kGuard or lchain(imap(lambda x: x.getGuards()[gIdx], self._scanner)))
keysDS = getActiveKeys(self._ds_keys_user, self._ds_keys_guard, 0)
keysB = getActiveKeys(self._b_keys_user, self._b_keys_guard, 1)
for fileInfo in ifilter(itemgetter(0), self._collectFiles()):
hashDS = self._generateKey(keysDS, md5_hex(repr(self._datasetExpr)) + md5_hex(repr(self._datasetNick)), *fileInfo)
hashB = self._generateKey(keysB, hashDS + md5_hex(repr(fileInfo[3])), *fileInfo) # [3] == SE list
if not self._ds_select or (hashDS in self._ds_select):
if not self._b_select or (hashB in self._b_select):
fileInfo[1].update({'DS_KEY': hashDS, 'BLOCK_KEY': hashB})
protoBlocks.setdefault(hashDS, {}).setdefault(hashB, []).append(fileInfo)
utils.intersectDict(commonDS.setdefault(hashDS, dict(fileInfo[1])), fileInfo[1])
utils.intersectDict(commonB.setdefault(hashDS, {}).setdefault(hashB, dict(fileInfo[1])), fileInfo[1])
# Generate names for blocks/datasets using common metadata
(hashNameDictDS, hashNameDictB) = ({}, {})
for hashDS in protoBlocks:
hashNameDictDS[hashDS] = self._generateDatasetName(hashDS, commonDS[hashDS])
for hashB in protoBlocks[hashDS]:
hashNameDictB[hashB] = (hashDS, self._generateBlockName(hashB, commonB[hashDS][hashB]))
self._findCollision('dataset', hashNameDictDS, commonDS, keysDS, lambda name, key: [key])
self._findCollision('block', hashNameDictB, commonB, keysDS + keysB, lambda name, key: [name[0], key], lambda name: name[1])
for block in self._buildBlocks(protoBlocks, hashNameDictDS, hashNameDictB):
yield block
示例13: get_dict
def get_dict(self, option, default=unspecified, parser=identity, strfun=str, **kwargs):
# Returns a tuple with (<dictionary>, <keys>) - the keys are sorted by order of appearance
# Default key is accessed via key == None (None is never in keys!)
return self._get_internal('dictionary',
obj2str=lambda value: str_dict_cfg(value, parser, strfun),
str2obj=lambda value: parse_dict_cfg(value, parser),
def2obj=lambda value: (value, sorted(ifilter(lambda key: key is not None, value.keys()))),
option=option, default_obj=default, **kwargs)
示例14: process
def process(self, dn):
jobInfo = JobInfoProcessor.process(self, dn)
if jobInfo:
jobData = jobInfo[2]
result = {}
# parse old job info data format for files
oldFileFormat = [FileInfoProcessor.Hash, FileInfoProcessor.NameLocal, FileInfoProcessor.NameDest, FileInfoProcessor.Path]
for (fileKey, fileData) in ifilter(lambda key_value: key_value[0].startswith('FILE'), jobData.items()):
fileIdx = fileKey.replace('FILE', '').rjust(1, '0')
result[int(fileIdx)] = dict(izip(oldFileFormat, fileData.strip('"').split(' ')))
# parse new job info data format
for (fileKey, fileData) in ifilter(lambda key_value: key_value[0].startswith('OUTPUT_FILE'), jobData.items()):
(fileIdx, fileProperty) = fileKey.replace('OUTPUT_FILE_', '').split('_')
if isinstance(fileData, str):
fileData = fileData.strip('"')
result.setdefault(int(fileIdx), {})[FileInfoProcessor.str2enum(fileProperty)] = fileData
return list(result.values())
示例15: get_cmssw_info
def get_cmssw_info(tar_fn):
import xml.dom.minidom
# Read framework report files to get number of events
cmssw_tar = tarfile.open(tar_fn, 'r:gz')
fwk_report_list = ifilter(lambda x: os.path.basename(x.name) == 'report.xml',
cmssw_tar.getmembers())
for fwk_report_fn in imap(cmssw_tar.extractfile, fwk_report_list):
yield xml.dom.minidom.parse(fwk_report_fn)