本文整理汇总了Python中python_compat.imap函数的典型用法代码示例。如果您正苦于以下问题:Python imap函数的具体用法?Python imap怎么用?Python imap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了imap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_section_key
def _get_section_key(self, section):
tmp = section.split()
if not tmp:
raise ConfigError('Invalid config section %r' % section)
(cur_section, cur_name_list, cur_tag_map) = (tmp[0], [], {})
for token in tmp[1:]:
if ':' in token:
tag_entry = token.split(':')
if len(tag_entry) != 2:
raise ConfigError('Invalid config tag in section %r' % section)
cur_tag_map[tag_entry[0]] = tag_entry[1]
elif token:
cur_name_list.append(token)
class_section_idx = safe_index(self._class_section_list, cur_section)
section_idx = safe_index(self._section_list, cur_section)
if (not self._class_section_list) and (not self._section_list):
section_idx = 0
if (class_section_idx is not None) or (section_idx is not None):
# Section is selected by class or manually
name_idx_tuple = tuple(imap(lambda n: safe_index(self._section_name_list, n), cur_name_list))
if None not in name_idx_tuple: # All names in current section are selected
cur_tag_name_list = lfilter(cur_tag_map.__contains__, self._section_tag_order)
left_tag_name_list = lfilter(lambda tn: tn not in self._section_tag_order, cur_tag_map)
tag_tuple_list = imap(lambda tn: (tn, cur_tag_map[tn]), cur_tag_name_list)
tag_idx_tuple = tuple(imap(lambda tt: safe_index(self._section_tag_list, tt), tag_tuple_list))
if (None not in tag_idx_tuple) and not left_tag_name_list:
return (class_section_idx, section_idx, name_idx_tuple, tag_idx_tuple)
示例2: _parseTime
def _parseTime(self, time_str):
result = 0
entry_map = {'yea': 365 * 24 * 60 * 60, 'day': 24 * 60 * 60, 'hou': 60 * 60, 'min': 60, 'sec': 1}
tmp = time_str.split()
for (entry, value) in izip(imap(lambda x: x[:3], tmp[1::2]), imap(int, tmp[::2])):
result += entry_map[entry] * value
return result
示例3: _get_sandbox_file_list
def _get_sandbox_file_list(self, task, sm_list):
# Prepare all input files
dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
*imap(lambda x: x.get_task_dict(), [task] + sm_list))
task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
task_config_str_list = DictFormat(escape_strings=True).format(
task_config_dict, format='export %s%s%s\n')
vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
self._remote_event_handler.get_mon_env_dict().keys()))
vn_alias_dict.update(task.get_var_alias_map())
vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')
# Resolve wildcards in task input files
def _get_task_fn_list():
for fpi in task.get_sb_in_fpi_list():
matched = glob.glob(fpi.path_abs)
if matched != []:
for match in matched:
yield match
else:
yield fpi.path_abs
return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
VirtualFile('_config.sh', sorted(task_config_str_list)),
VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
示例4: __init__
def __init__(self, jobDB, task, jobs = None, configString = ''):
Report.__init__(self, jobDB, task, jobs, configString)
catJobs = {}
catDescDict = {}
# Assignment of jobs to categories (depending on variables and using datasetnick if available)
jobConfig = {}
for jobNum in self._jobs:
if task:
jobConfig = task.getJobConfig(jobNum)
varList = sorted(ifilter(lambda var: '!' not in repr(var), jobConfig.keys()))
if 'DATASETSPLIT' in varList:
varList.remove('DATASETSPLIT')
varList.append('DATASETNICK')
catKey = str.join('|', imap(lambda var: '%s=%s' % (var, jobConfig[var]), varList))
catJobs.setdefault(catKey, []).append(jobNum)
if catKey not in catDescDict:
catDescDict[catKey] = dict(imap(lambda var: (var, jobConfig[var]), varList))
# Kill redundant keys from description
commonVars = dict(imap(lambda var: (var, jobConfig[var]), varList)) # seed with last varList
for catKey in catDescDict:
for key in list(commonVars.keys()):
if key not in catDescDict[catKey].keys():
commonVars.pop(key)
elif commonVars[key] != catDescDict[catKey][key]:
commonVars.pop(key)
for catKey in catDescDict:
for commonKey in commonVars:
catDescDict[catKey].pop(commonKey)
# Generate job-category map with efficient int keys - catNum becomes the new catKey
self._job2cat = {}
self._catDescDict = {}
for catNum, catKey in enumerate(sorted(catJobs)):
self._catDescDict[catNum] = catDescDict[catKey]
self._job2cat.update(dict.fromkeys(catJobs[catKey], catNum))
示例5: __init__
def __init__(self, config, job_db, task):
map_cat2jobs = {}
map_cat2desc = {}
job_config_dict = {}
vn_list = []
for jobnum in job_db.get_job_list():
if task:
job_config_dict = task.get_job_dict(jobnum)
vn_list = lfilter(self._is_not_ignored_vn, sorted(job_config_dict.keys()))
cat_key = str.join('|', imap(lambda vn: '%s=%s' % (vn, job_config_dict[vn]), vn_list))
map_cat2jobs.setdefault(cat_key, []).append(jobnum)
if cat_key not in map_cat2desc:
map_cat2desc[cat_key] = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
# Kill redundant keys from description - seed with last vn_list
common_var_dict = dict(imap(lambda var: (var, job_config_dict[var]), vn_list))
for cat_key in map_cat2desc:
for key in list(common_var_dict.keys()):
if key not in map_cat2desc[cat_key].keys():
common_var_dict.pop(key)
elif common_var_dict[key] != map_cat2desc[cat_key][key]:
common_var_dict.pop(key)
for cat_key in map_cat2desc:
for common_key in common_var_dict:
map_cat2desc[cat_key].pop(common_key)
# Generate job-category map with efficient int keys - catNum becomes the new cat_key
self._job2cat = {}
self._map_cat2desc = {}
for cat_num, cat_key in enumerate(sorted(map_cat2jobs)):
self._map_cat2desc[cat_num] = map_cat2desc[cat_key]
self._job2cat.update(dict.fromkeys(map_cat2jobs[cat_key], cat_num))
示例6: _resyncInternal
def _resyncInternal(self): # This function is _VERY_ time critical!
tmp = self._rawSource.resync() # First ask about psource changes
(redoNewPNum, disableNewPNum, sizeChange) = (set(tmp[0]), set(tmp[1]), tmp[2])
hashNew = self._rawSource.getHash()
hashChange = self._storedHash != hashNew
self._storedHash = hashNew
if not (redoNewPNum or disableNewPNum or sizeChange or hashChange):
self._resyncState = None
return
psource_old = ParameterAdapter(None, ParameterSource.createInstance('GCDumpParameterSource', self._pathParams))
psource_new = ParameterAdapter(None, self._rawSource)
mapJob2PID = {}
(pAdded, pMissing, _) = self._diffParams(psource_old, psource_new, mapJob2PID, redoNewPNum, disableNewPNum)
self._source = self._getResyncSource(psource_old, psource_new, mapJob2PID, pAdded, pMissing, disableNewPNum)
self._mapJob2PID = mapJob2PID # Update Job2PID map
redoNewPNum = redoNewPNum.difference(disableNewPNum)
if redoNewPNum or disableNewPNum:
mapPID2Job = dict(ismap(utils.swap, self._mapJob2PID.items()))
translate = lambda pNum: mapPID2Job.get(pNum, pNum)
self._resyncState = (set(imap(translate, redoNewPNum)), set(imap(translate, disableNewPNum)), sizeChange)
elif sizeChange:
self._resyncState = (set(), set(), sizeChange)
# Write resynced state
self._writeJob2PID(self._pathJob2PID + '.tmp')
ParameterSource.getClass('GCDumpParameterSource').write(self._pathParams + '.tmp', self)
os.rename(self._pathJob2PID + '.tmp', self._pathJob2PID)
os.rename(self._pathParams + '.tmp', self._pathParams)
示例7: _resync_adapter
def _resync_adapter(self, pa_old, pa_new, result_redo, result_disable, size_change):
(map_jobnum2pnum, pspi_list_added, pspi_list_missing) = _diff_pspi_list(pa_old, pa_new,
result_redo, result_disable)
# Reorder and reconstruct parameter space with the following layout:
# NNNNNNNNNNNNN OOOOOOOOO | source: NEW (==self) and OLD (==from file)
# <same><added> <missing> | same: both in NEW and OLD, added: only in NEW, missing: only in OLD
if pspi_list_added:
_extend_map_jobnum2pnum(map_jobnum2pnum, pa_old.get_job_len(), pspi_list_added)
if pspi_list_missing:
# extend the parameter source by placeholders for the missing parameter space points
psrc_missing = _create_placeholder_psrc(pa_old, pa_new,
map_jobnum2pnum, pspi_list_missing, result_disable)
self._psrc = ParameterSource.create_instance('ChainParameterSource',
self._psrc_raw, psrc_missing)
self._map_jobnum2pnum = map_jobnum2pnum # Update Job2PID map
# Write resynced state
self._write_jobnum2pnum(self._path_jobnum2pnum + '.tmp')
ParameterSource.get_class('GCDumpParameterSource').write(self._path_params + '.tmp',
self.get_job_len(), self.get_job_metadata(), self.iter_jobs())
os.rename(self._path_jobnum2pnum + '.tmp', self._path_jobnum2pnum)
os.rename(self._path_params + '.tmp', self._path_params)
result_redo = result_redo.difference(result_disable)
if result_redo or result_disable:
map_pnum2jobnum = reverse_dict(self._map_jobnum2pnum)
def _translate_pnum(pnum):
return map_pnum2jobnum.get(pnum, pnum)
result_redo = set(imap(_translate_pnum, result_redo))
result_disable = set(imap(_translate_pnum, result_disable))
return (result_redo, result_disable, size_change)
return (set(), set(), size_change)
示例8: split_brackets
def split_brackets(tokens, brackets = None, exType = Exception):
if brackets is None:
brackets = ['()', '{}', '[]']
buffer = ''
stack_bracket = []
map_close_to_open = dict(imap(lambda x: (x[1], x[0]), brackets))
position = 0
for token in tokens:
position += len(token) # store position for proper error messages
if token in map_close_to_open.values():
stack_bracket.append((token, position))
if token in map_close_to_open.keys():
if not stack_bracket:
raise exType('Closing bracket %r at position %d is without opening bracket' % (token, position))
elif stack_bracket[-1][0] == map_close_to_open[token]:
stack_bracket.pop()
if not stack_bracket:
buffer += token
yield buffer
buffer = ''
continue
else:
raise exType('Closing bracket %r at position %d does not match bracket %r at position %d' % (token, position, stack_bracket[-1][0], stack_bracket[-1][1]))
if stack_bracket:
buffer += token
else:
yield token
if stack_bracket:
raise exType('Unclosed brackets %s' % str.join(', ', imap(lambda b_pos: '%r at position %d' % b_pos, stack_bracket)))
示例9: get_job_dict
def get_job_dict(self, jobnum):
# Get job dependent environment variables
job_env_dict = SCRAMTask.get_job_dict(self, jobnum)
if not self._has_dataset:
job_env_dict['MAX_EVENTS'] = self._events_per_job
job_env_dict.update(dict(self._cmssw_search_dict))
if self._do_gzip_std_output:
job_env_dict['GZIP_OUT'] = 'yes'
if self._project_area_tarball_on_se:
job_env_dict['SE_RUNTIME'] = 'yes'
if self._project_area:
job_env_dict['HAS_RUNTIME'] = 'yes'
job_env_dict['CMSSW_EXEC'] = 'cmsRun'
job_env_dict['CMSSW_CONFIG'] = str.join(' ', imap(os.path.basename, self._config_fn_list))
job_env_dict['CMSSW_OLD_RELEASETOP'] = self._old_release_top
if self.prolog.is_active():
job_env_dict['CMSSW_PROLOG_EXEC'] = self.prolog.get_command()
job_env_dict['CMSSW_PROLOG_SB_IN_FILES'] = str.join(' ',
imap(lambda x: x.path_rel, self.prolog.get_sb_in_fpi_list()))
job_env_dict['CMSSW_PROLOG_ARGS'] = self.prolog.get_arguments()
if self.epilog.is_active():
job_env_dict['CMSSW_EPILOG_EXEC'] = self.epilog.get_command()
job_env_dict['CMSSW_EPILOG_SB_IN_FILES'] = str.join(' ',
imap(lambda x: x.path_rel, self.epilog.get_sb_in_fpi_list()))
job_env_dict['CMSSW_EPILOG_ARGS'] = self.epilog.get_arguments()
return job_env_dict
示例10: collapse_psp_list
def collapse_psp_list(psp_list, tracked_list, opts):
psp_dict = {}
psp_dict_nicks = {}
header_list = [('COLLATE_JOBS', '# of jobs')]
if 'DATASETSPLIT' in tracked_list:
tracked_list.remove('DATASETSPLIT')
if opts.collapse == 1:
tracked_list.append('DATASETNICK')
header_list.append(('DATASETNICK', 'DATASETNICK'))
elif opts.collapse == 2:
header_list.append(('COLLATE_NICK', '# of nicks'))
for pset in psp_list:
if ('DATASETSPLIT' in pset) and (opts.collapse == 1):
pset.pop('DATASETSPLIT')
nickname = None
if ('DATASETNICK' in pset) and (opts.collapse == 2):
nickname = pset.pop('DATASETNICK')
hash_str = md5_hex(repr(lmap(lambda key: pset.get(str(key)), tracked_list)))
psp_dict.setdefault(hash_str, []).append(pset)
psp_dict_nicks.setdefault(hash_str, set()).add(nickname)
def _do_collate(hash_str):
psp = psp_dict[hash_str][0]
psp['COLLATE_JOBS'] = len(psp_dict[hash_str])
psp['COLLATE_NICK'] = len(psp_dict_nicks[hash_str])
return psp
psp_list = sorted(imap(_do_collate, psp_dict), key=lambda x: tuple(imap(str, x.values())))
return (header_list, psp_list)
示例11: __init__
def __init__(self, head, data, delimeter='|'):
ConsoleTable.__init__(self)
head = list(head)
self._delimeter = delimeter
self._write_line(str.join(self._delimeter, imap(lambda x: x[1], head)))
for entry in data:
if isinstance(entry, dict):
self._write_line(str.join(self._delimeter, imap(lambda x: str(entry.get(x[0], '')), head)))
示例12: _parse_status
def _parse_status(self, value, default):
if any(imap(lambda x: x in value, ['E', 'e'])):
return Job.UNKNOWN
if any(imap(lambda x: x in value, ['h', 's', 'S', 'T', 'w'])):
return Job.QUEUED
if any(imap(lambda x: x in value, ['r', 't'])):
return Job.RUNNING
return Job.READY
示例13: _parse_status
def _parse_status(self, value, default):
if any(imap(value.__contains__, ['E', 'e'])):
return Job.UNKNOWN
if any(imap(value.__contains__, ['h', 's', 'S', 'T', 'w'])):
return Job.QUEUED
if any(imap(value.__contains__, ['r', 't'])):
return Job.RUNNING
return Job.READY
示例14: process
def process(self, pNum, splitInfo, result):
if not self._lumi_filter.empty():
lumi_filter = self._lumi_filter.lookup(splitInfo[DataSplitter.Nickname], is_selector = False)
if lumi_filter:
idxRuns = splitInfo[DataSplitter.MetadataHeader].index("Runs")
iterRuns = ichain(imap(lambda m: m[idxRuns], splitInfo[DataSplitter.Metadata]))
short_lumi_filter = filterLumiFilter(list(iterRuns), lumi_filter)
result['LUMI_RANGE'] = str.join(',', imap(lambda lr: '"%s"' % lr, formatLumi(short_lumi_filter)))
示例15: process
def process(self, pnum, partition, result):
if self.enabled():
lumi_filter = self._lumi_filter.lookup(partition[DataSplitter.Nickname], is_selector=False)
if lumi_filter:
idx_runs = partition[DataSplitter.MetadataHeader].index('Runs')
iter_run = ichain(imap(lambda m: m[idx_runs], partition[DataSplitter.Metadata]))
short_lumi_filter = filter_lumi_filter(list(iter_run), lumi_filter)
iter_lumi_range_str = imap(lambda lr: '"%s"' % lr, format_lumi(short_lumi_filter))
result['LUMI_RANGE'] = str.join(',', iter_lumi_range_str)