本文整理汇总了Python中python_compat.lmap函数的典型用法代码示例。如果您正苦于以下问题:Python lmap函数的具体用法?Python lmap怎么用?Python lmap使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lmap函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: deploy_task
def deploy_task(self, task, transfer_se, transfer_sb):
# HACK
self._output_fn_list = lmap(lambda d_s_t: d_s_t[2], self._get_out_transfer_info_list(task))
task.validate_variables()
# add task SE files to SM
self._sm_se_in.add_file_list(lmap(lambda d_s_t: d_s_t[2], task.get_se_in_fn_list()))
# Transfer common SE files
if transfer_se:
self._sm_se_in.do_transfer(task.get_se_in_fn_list())
def _convert(fn_list):
for fn in fn_list:
if isinstance(fn, str):
yield (fn, os.path.basename(fn))
else:
yield (fn, os.path.basename(fn.name))
# Package sandbox tar file
self._log.log(logging.INFO1, 'Packing sandbox')
sandbox = self._get_sandbox_name(task)
ensure_dir_exists(os.path.dirname(sandbox), 'sandbox directory')
if not os.path.exists(sandbox) or transfer_sb:
sandbox_file_list = self._get_sandbox_file_list(task, [self._sm_se_in, self._sm_se_out])
create_tarball(_convert(sandbox_file_list), name=sandbox)
示例2: __init__
def __init__(self, fn, format = 'sniffed'):
(self._fn, self._format) = (fn, format)
fp = open(fn)
try:
first_line = fp.readline()
sniffed = csv.Sniffer().sniff(first_line)
csv.register_dialect('sniffed', sniffed)
tmp = list(csv.DictReader(fp, first_line.strip().split(sniffed.delimiter) + [None], dialect = format))
finally:
fp.close()
for entry in tmp:
entry.pop(None, None)
if None in entry.values():
raise Exception('Malformed entry in csv file %r: %r' % (fn, entry))
def cleanupDict(d):
# strip all key value entries
tmp = tuple(imap(lambda item: lmap(str.strip, item), d.items()))
# filter empty parameters
return lfilter(lambda k_v: k_v[0] != '', tmp)
keys = []
if len(tmp):
keys = lmap(ParameterMetadata, tmp[0].keys())
values = lmap(lambda d: dict(cleanupDict(d)), tmp)
InternalParameterSource.__init__(self, values, keys)
示例3: makeJDL
def makeJDL(self, jobNum, module):
cfgPath = os.path.join(self._jobPath, 'job_%d.var' % jobNum)
sbIn = lmap(lambda d_s_t: d_s_t[1], self._getSandboxFilesIn(module))
sbOut = lmap(lambda d_s_t: d_s_t[2], self._getSandboxFilesOut(module))
wcList = lfilter(lambda x: '*' in x, sbOut)
if len(wcList):
self._writeJobConfig(cfgPath, jobNum, module, {'GC_WC': str.join(' ', wcList)})
sandboxOutJDL = lfilter(lambda x: x not in wcList, sbOut) + ['GC_WC.tar.gz']
else:
self._writeJobConfig(cfgPath, jobNum, module, {})
sandboxOutJDL = sbOut
# Warn about too large sandboxes
sbSizes = lmap(os.path.getsize, sbIn)
if sbSizes and (self._warnSBSize > 0) and (sum(sbSizes) > self._warnSBSize * 1024 * 1024):
if not utils.getUserBool('Sandbox is very large (%d bytes) and can cause issues with the WMS! Do you want to continue?' % sum(sbSizes), False):
sys.exit(os.EX_OK)
self._warnSBSize = 0
reqs = self.brokerSite.brokerAdd(module.getRequirements(jobNum), WMS.SITES)
formatStrList = lambda strList: '{ %s }' % str.join(', ', imap(lambda x: '"%s"' % x, strList))
contents = {
'Executable': '"gc-run.sh"',
'Arguments': '"%d"' % jobNum,
'StdOutput': '"gc.stdout"',
'StdError': '"gc.stderr"',
'InputSandbox': formatStrList(sbIn + [cfgPath]),
'OutputSandbox': formatStrList(sandboxOutJDL),
'VirtualOrganisation': '"%s"' % self.vo,
'Rank': '-other.GlueCEStateEstimatedResponseTime',
'RetryCount': 2
}
return self._jdl_writer.format(reqs, contents)
示例4: cancel
def cancel(self, wms, jobs, interactive = False, showJobs = True):
if len(jobs) == 0:
return
if showJobs:
self._reportClass(self.jobDB, self._task, jobs).display()
if interactive and not utils.getUserBool('Do you really want to cancel these jobs?', True):
return
def mark_cancelled(jobNum):
jobObj = self.jobDB.get(jobNum)
if jobObj is None:
return
self._update(jobObj, jobNum, Job.CANCELLED)
self._eventhandler.onJobUpdate(wms, jobObj, jobNum, {'reason': 'cancelled'})
jobs.reverse()
for (jobNum, wmsId) in wms.cancelJobs(self._wmsArgs(jobs)):
# Remove deleted job from todo list and mark as cancelled
assert(self.jobDB.get(jobNum).wmsId == wmsId)
jobs.remove(jobNum)
mark_cancelled(jobNum)
if len(jobs) > 0:
self._log_user.warning('There was a problem with cancelling the following jobs:')
self._reportClass(self.jobDB, self._task, jobs).display()
if (interactive and utils.getUserBool('Do you want to mark them as cancelled?', True)) or not interactive:
lmap(mark_cancelled, jobs)
if interactive:
utils.wait(2)
示例5: _get_sandbox_file_list
def _get_sandbox_file_list(self, task, sm_list):
# Prepare all input files
dep_list = set(ichain(imap(lambda x: x.get_dependency_list(), [task] + sm_list)))
dep_fn_list = lmap(lambda dep: resolve_path('env.%s.sh' % dep,
lmap(lambda pkg: get_path_share('', pkg=pkg), os.listdir(get_path_pkg()))), dep_list)
task_config_dict = dict_union(self._remote_event_handler.get_mon_env_dict(),
*imap(lambda x: x.get_task_dict(), [task] + sm_list))
task_config_dict.update({'GC_DEPFILES': str.join(' ', dep_list),
'GC_USERNAME': self._token.get_user_name(), 'GC_WMS_NAME': self._name})
task_config_str_list = DictFormat(escape_strings=True).format(
task_config_dict, format='export %s%s%s\n')
vn_alias_dict = dict(izip(self._remote_event_handler.get_mon_env_dict().keys(),
self._remote_event_handler.get_mon_env_dict().keys()))
vn_alias_dict.update(task.get_var_alias_map())
vn_alias_str_list = DictFormat(delimeter=' ').format(vn_alias_dict, format='%s%s%s\n')
# Resolve wildcards in task input files
def _get_task_fn_list():
for fpi in task.get_sb_in_fpi_list():
matched = glob.glob(fpi.path_abs)
if matched != []:
for match in matched:
yield match
else:
yield fpi.path_abs
return lchain([self._remote_event_handler.get_file_list(), dep_fn_list, _get_task_fn_list(), [
VirtualFile('_config.sh', sorted(task_config_str_list)),
VirtualFile('_varmap.dat', sorted(vn_alias_str_list))]])
示例6: _cancel
def _cancel(self, task, wms, jobnum_list, interactive, show_jobs):
if len(jobnum_list) == 0:
return
if show_jobs:
self._abort_report.show_report(self.job_db, jobnum_list)
if interactive and not self._uii.prompt_bool('Do you really want to cancel these jobs?', True):
return
def _mark_cancelled(jobnum):
job_obj = self.job_db.get_job(jobnum)
if job_obj is not None:
self._update(task, job_obj, jobnum, Job.CANCELLED)
self._local_event_handler.on_job_update(task, wms, job_obj, jobnum, {'reason': 'cancelled'})
jobnum_list.reverse()
map_gc_id2jobnum = self._get_map_gc_id_jobnum(jobnum_list)
gc_id_list = sorted(map_gc_id2jobnum, key=lambda gc_id: -map_gc_id2jobnum[gc_id])
for (gc_id,) in wms.cancel_jobs(gc_id_list):
# Remove cancelledd job from todo list and mark as cancelled
_mark_cancelled(map_gc_id2jobnum.pop(gc_id))
if map_gc_id2jobnum:
jobnum_list = list(map_gc_id2jobnum.values())
self._log.warning('There was a problem with cancelling the following jobs:')
self._abort_report.show_report(self.job_db, jobnum_list)
if (not interactive) or self._uii.prompt_bool('Do you want to mark them as cancelled?', True):
lmap(_mark_cancelled, jobnum_list)
if interactive:
wait(2)
示例7: join_config_locations
def join_config_locations(opt_first, *opt_list):
if isinstance(opt_first, (list, tuple)): # first option is a list - expand the first parameter
if not opt_list: # only first option -> clean and return
return lmap(str.strip, opt_first)
return lchain(imap(lambda opt: join_config_locations(opt.strip(), *opt_list), opt_first))
if not opt_list: # only first option -> clean and return
return [opt_first.strip()]
return lmap(lambda opt: (opt_first + ' ' + opt).strip(), join_config_locations(*opt_list))
示例8: process
def process(self, pNum, splitInfo, result):
fl = splitInfo[DataSplitter.FileList]
locations = splitInfo.get(DataSplitter.Locations)
if not locations:
splitInfo[DataSplitter.FileList] = lmap(lambda fn: self._lookup(fn, None), fl)
else:
for location in locations:
splitInfo[DataSplitter.FileList] = lmap(lambda fn: self._lookup(fn, location), fl)
示例9: process
def process(self, pnum, partition_info, result):
url_list = partition_info[DataSplitter.FileList]
locations = partition_info.get(DataSplitter.Locations)
if not locations:
partition_info[DataSplitter.FileList] = lmap(lambda url: self._lookup(url, None), url_list)
else:
for location in locations:
partition_info[DataSplitter.FileList] = lmap(lambda url: self._lookup(url, location), url_list)
示例10: wait
def wait(timeout):
shortStep = lmap(lambda x: (x, 1), irange(max(timeout - 5, 0), timeout))
for x, w in lmap(lambda x: (x, 5), irange(0, timeout - 5, 5)) + shortStep:
if abort():
return False
log = ActivityLog('waiting for %d seconds' % (timeout - x))
time.sleep(w)
del log
return True
示例11: __init__
def __init__(self, arg, **kwargs):
def parseTerm(term):
cmpValue = utils.QM(term[0] == '~', False, True)
term = term.lstrip('~')
selectorType = utils.QM(term[0].isdigit(), 'id', 'state')
if ':' in term:
selectorType = term.split(':', 1)[0]
selector = JobSelector.createInstance(selectorType, term.split(':', 1)[-1], **kwargs)
return lambda jobNum, jobObj: selector.__call__(jobNum, jobObj) == cmpValue
orTerms = str.join('+', imap(str.strip, arg.split('+'))).split()
self.js = lmap(lambda orTerm: lmap(parseTerm, orTerm.split('+')), orTerms)
示例12: _getPartition
def _getPartition(self, key):
if not self._cacheKey == key / 100:
self._cacheKey = key / 100
subTarFileObj = self._tar.extractfile('%03dXX.tgz' % (key / 100))
subTarFileObj = BytesBuffer(gzip.GzipFile(fileobj = subTarFileObj).read()) # 3-4x speedup for sequential access
self._cacheTar = tarfile.open(mode = 'r', fileobj = subTarFileObj)
data = self._fmt.parse(self._cacheTar.extractfile('%05d/info' % key).readlines(),
keyParser = {None: int}, valueParser = self._parserMap)
fileList = lmap(bytes2str, self._cacheTar.extractfile('%05d/list' % key).readlines())
if DataSplitter.CommonPrefix in data:
fileList = imap(lambda x: '%s/%s' % (data[DataSplitter.CommonPrefix], x), fileList)
data[DataSplitter.FileList] = lmap(str.strip, fileList)
return data
示例13: finaliseJobSplitting
def finaliseJobSplitting(self, block, splitInfo, files = None):
# Copy infos from block
for prop in ['Dataset', 'BlockName', 'Nickname', 'Locations']:
if getattr(DataProvider, prop) in block:
splitInfo[getattr(DataSplitter, prop)] = block[getattr(DataProvider, prop)]
if DataProvider.Metadata in block:
splitInfo[DataSplitter.MetadataHeader] = block[DataProvider.Metadata]
# Helper for very simple splitter
if files:
splitInfo[DataSplitter.FileList] = lmap(lambda x: x[DataProvider.URL], files)
splitInfo[DataSplitter.NEntries] = sum(imap(lambda x: x[DataProvider.NEntries], files))
if DataProvider.Metadata in block:
splitInfo[DataSplitter.Metadata] = lmap(lambda x: x[DataProvider.Metadata], files)
return splitInfo
示例14: _finish_partition
def _finish_partition(self, block, partition, fi_list=None):
# Copy infos from block
for (dp_prop, ds_prop) in self._dp_ds_prop_list:
if dp_prop in block:
partition[ds_prop] = block[dp_prop]
if DataProvider.Metadata in block:
partition[DataSplitter.MetadataHeader] = block[DataProvider.Metadata]
# Helper for very simple splitter
if fi_list:
partition[DataSplitter.FileList] = lmap(itemgetter(DataProvider.URL), fi_list)
partition[DataSplitter.NEntries] = sum(imap(itemgetter(DataProvider.NEntries), fi_list))
if DataProvider.Metadata in block:
partition[DataSplitter.Metadata] = lmap(itemgetter(DataProvider.Metadata), fi_list)
return partition
示例15: __init__
def __init__(self, arg, **kwargs):
def _parse_term(term):
negate = (term[0] == '~')
term = term.lstrip('~')
selector_type = 'state'
if term[0].isdigit():
selector_type = 'id'
elif ':' in term:
selector_type = term.split(':', 1)[0]
selector = JobSelector.create_instance(selector_type, term.split(':', 1)[-1], **kwargs)
if negate:
return lambda jobnum, job_obj: not selector.__call__(jobnum, job_obj)
return selector
(self._arg, or_term_list) = (arg, str.join('+', imap(str.strip, arg.split('+'))).split())
self._js = lmap(lambda orTerm: lmap(_parse_term, orTerm.split('+')), or_term_list)