本文整理汇总了Python中python_compat.irange函数的典型用法代码示例。如果您正苦于以下问题:Python irange函数的具体用法?Python irange怎么用?Python irange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了irange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wait
def wait(timeout):
shortStep = lmap(lambda x: (x, 1), irange(max(timeout - 5, 0), timeout))
for x, w in lmap(lambda x: (x, 5), irange(0, timeout - 5, 5)) + shortStep:
if abort():
return False
log = ActivityLog('waiting for %d seconds' % (timeout - x))
time.sleep(w)
del log
return True
示例2: _create_job_obj
def _create_job_obj(self, name, data):
try:
job = Job()
job.state = Job.str2enum(data.pop('status'), Job.UNKNOWN)
if 'id' in data:
gc_id = data.pop('id')
if not gc_id.startswith('WMSID'): # Legacy support
data['legacy_gc_id'] = gc_id
if gc_id.startswith('https'):
gc_id = 'WMSID.GLITEWMS.%s' % gc_id
else:
wms_id, wms_name = tuple(gc_id.split('.', 1))
gc_id = 'WMSID.%s.%s' % (wms_name, wms_id)
job.gc_id = gc_id
for key in ['attempt', 'submitted', 'changed']:
if key in data:
setattr(job, key, data[key])
if 'runtime' not in data:
if 'submitted' in data and (job.submitted > 0):
data['runtime'] = time.time() - float(job.submitted)
else:
data['runtime'] = 0
for key in irange(1, job.attempt + 1):
if ('history_' + str(key)).strip() in data:
job.history[key] = data['history_' + str(key)]
job.set_dict(data)
except Exception:
raise JobError('Unable to parse data in %s:\n%r' % (name, data))
return job
示例3: run_command
def run_command(cmd, args, fd_map, env): # run command by replacing the current process
def _safe_close(file_descriptor):
try:
os.close(file_descriptor)
except Exception:
pass
for fd_target, fd_source in fd_map.items():
os.dup2(fd_source, fd_target) # set stdin/stdout/stderr
try:
fd_max = os.sysconf('SC_OPEN_MAX')
except Exception:
fd_max = 256
for fd_open in irange(3, fd_max): # close inherited file descriptors except for std{in/out/err}
_safe_close(fd_open)
try:
os.execve(cmd, args, env) # replace process - this command DOES NOT RETURN if successful!
except Exception:
pass
error_msg_list = [
'== grid-control process error ==',
' pid: %s' % os.getpid(),
' fd map: %s' % repr(fd_map),
'environment: %s' % repr(env),
' command: %s' % repr(cmd),
' arguments: %s' % repr(args),
' exception: %s' % repr(sys.exc_info()[1]),
]
sys.stderr.write(str.join('\n', error_msg_list))
for fd_std in [0, 1, 2]:
_safe_close(fd_std)
exit_without_cleanup(os.EX_OSERR) # exit forked process with OS error
示例4: partition_check
def partition_check(splitter):
fail = utils.set()
for jobNum in irange(splitter.getMaxJobs()):
splitInfo = splitter.getSplitInfo(jobNum)
try:
(events, skip, files) = (0, 0, [])
for line in open(os.path.join(opts.checkSplitting, 'jobs', 'job_%d.var' % jobNum)).readlines():
if 'MAX_EVENTS' in line:
events = int(line.split('MAX_EVENTS', 1)[1].replace('=', ''))
if 'SKIP_EVENTS' in line:
skip = int(line.split('SKIP_EVENTS', 1)[1].replace('=', ''))
if 'FILE_NAMES' in line:
files = line.split('FILE_NAMES', 1)[1].replace('=', '').replace('\"', '').replace('\\', '')
files = lmap(lambda x: x.strip().strip(','), files.split())
def printError(curJ, curS, msg):
if curJ != curS:
logging.warning('%s in job %d (j:%s != s:%s)', msg, jobNum, curJ, curS)
fail.add(jobNum)
printError(events, splitInfo[DataSplitter.NEntries], 'Inconsistent number of events')
printError(skip, splitInfo[DataSplitter.Skipped], 'Inconsistent number of skipped events')
printError(files, splitInfo[DataSplitter.FileList], 'Inconsistent list of files')
except Exception:
logging.warning('Job %d was never initialized!', jobNum)
if fail:
logging.warning('Failed: ' + str.join('\n', imap(str, fail)))
示例5: loadData
def loadData(cls, name, data):
try:
job = Job()
job.state = Job.str2enum(data.get('status'), Job.FAILED)
if 'id' in data:
if not data['id'].startswith('WMSID'): # Legacy support
data['legacy'] = data['id']
if data['id'].startswith('https'):
data['id'] = 'WMSID.GLITEWMS.%s' % data['id']
else:
wmsId, backend = tuple(data['id'].split('.', 1))
data['id'] = 'WMSID.%s.%s' % (backend, wmsId)
job.wmsId = data['id']
for key in ['attempt', 'submitted', 'changed']:
if key in data:
setattr(job, key, data[key])
if 'runtime' not in data:
if 'submitted' in data:
data['runtime'] = time.time() - float(job.submitted)
else:
data['runtime'] = 0
for key in irange(1, job.attempt + 1):
if ('history_' + str(key)).strip() in data:
job.history[key] = data['history_' + str(key)]
for i in cls.__internals:
try:
del data[i]
except Exception:
pass
job.dict = data
except Exception:
raise JobError('Unable to parse data in %s:\n%r' % (name, data))
return job
示例6: cancelJobs
def cancelJobs(self, allIds):
if len(allIds) == 0:
raise StopIteration
waitFlag = False
for ids in imap(lambda x: allIds[x:x+5], irange(0, len(allIds), 5)):
# Delete jobs in groups of 5 - with 5 seconds between groups
if waitFlag and not utils.wait(5):
break
waitFlag = True
jobNumMap = dict(ids)
jobs = self.writeWMSIds(ids)
activity = utils.ActivityLog('cancelling jobs')
proc = LocalProcess(self._cancelExec, '--noint', '--logfile', '/dev/stderr', '-i', jobs)
retCode = proc.status(timeout = 60, terminate = True)
del activity
# select cancelled jobs
for deletedWMSId in ifilter(lambda x: x.startswith('- '), proc.stdout.iter()):
deletedWMSId = self._createId(deletedWMSId.strip('- \n'))
yield (jobNumMap.get(deletedWMSId), deletedWMSId)
if retCode != 0:
if self.explainError(proc, retCode):
pass
else:
self._log.log_process(proc, files = {'jobs': utils.safeRead(jobs)})
utils.removeFiles([jobs])
示例7: logging_create_handlers
def logging_create_handlers(config, logger_name):
LogLevelEnum = makeEnum(lmap(lambda level: logging.getLevelName(level).upper(), irange(51)))
logger = logging.getLogger(logger_name.lower())
# Set logging level
logger.setLevel(config.getEnum(logger_name + ' level', LogLevelEnum, logger.level, onChange = None))
# Set propagate status
logger.propagate = config.getBool(logger_name + ' propagate', bool(logger.propagate), onChange = None)
# Setup handlers
if logger_name + ' handler' in config.getOptions():
# remove any standard handlers:
for handler in list(logger.handlers):
logger.removeHandler(handler)
handler_list = config.getList(logger_name + ' handler', [], onChange = None)
for handler_str in set(handler_list): # add only unique output handlers
if handler_str == 'stdout':
handler = StdoutStreamHandler()
elif handler_str == 'stderr':
handler = StderrStreamHandler()
elif handler_str == 'file':
handler = logging.FileHandler(config.get(logger_name + ' file', onChange = None), 'w')
elif handler_str == 'debug_file':
handler = GCLogHandler(config.get(logger_name + ' debug file', onChange = None), 'w')
else:
raise Exception('Unknown handler %s for logger %s' % (handler_str, logger_name))
logger.addHandler(logging_configure_handler(config, logger_name, handler_str, handler))
示例8: __init__
def __init__(self, config):
ParameterFactory.__init__(self, config)
self._psrc_list = []
# Random number variables
jobs_config = config.change_view(add_sections=['jobs'])
self._random_variables = jobs_config.get_list('random variables', ['JOB_RANDOM'], on_change=None)
nseeds = jobs_config.get_int('nseeds', 10)
seeds_new = lmap(lambda x: str(random.randint(0, 10000000)), irange(nseeds))
self._random_seeds = jobs_config.get_list('seeds', seeds_new, persistent=True)
# Get constants from [constants <tags...>]
constants_config = config.change_view(view_class='TaggedConfigView',
set_classes=None, set_sections=['constants'], set_names=None)
constants_pconfig = ParameterConfig(constants_config)
for vn_const in ifilter(lambda opt: ' ' not in opt, constants_config.get_option_list()):
constants_config.set('%s type' % vn_const, 'verbatim', '?=')
self._register_psrc(constants_pconfig, vn_const.upper())
param_config = config.change_view(view_class='TaggedConfigView',
set_classes=None, add_sections=['parameters'], inherit_sections=True)
# Get constants from [<Module>] constants
task_pconfig = ParameterConfig(param_config)
for vn_const in param_config.get_list('constants', []):
config.set('%s type' % vn_const, 'verbatim', '?=')
self._register_psrc(task_pconfig, vn_const)
# Get global repeat value from 'parameters' section
self._repeat = param_config.get_int('repeat', -1, on_change=None)
self._req = param_config.get_bool('translate requirements', True, on_change=None)
self._pfactory = param_config.get_plugin('parameter factory', 'SimpleParameterFactory',
cls=ParameterFactory)
示例9: draw_pie
def draw_pie(ax, breakdown, pos, size, piecolor = None):
piecolor = piecolor or ['red', 'orange', 'green', 'blue', 'purple']
breakdown = [0] + list(numpy.cumsum(breakdown)* 1.0 / sum(breakdown))
for i in irange(len(breakdown)-1):
x = [0] + numpy.cos(numpy.linspace(2 * math.pi * breakdown[i], 2 * math.pi * breakdown[i+1], 20)).tolist()
y = [0] + numpy.sin(numpy.linspace(2 * math.pi * breakdown[i], 2 * math.pi * breakdown[i+1], 20)).tolist()
ax.scatter(pos[0], pos[1], marker=(lzip(x, y), 0), s = size, facecolor = piecolor[i % len(piecolor)])
示例10: clearOPStack
def clearOPStack(opList, opStack, tokStack):
while len(opStack) and (opStack[-1][0] in opList):
operator = opStack.pop()
tmp = []
for dummy in irange(len(operator) + 1):
tmp.append(tokStack.pop())
tmp.reverse()
tokStack.append((operator[0], tmp))
示例11: execute
def execute(self, wmsIDs, *args, **kwargs):
do_wait = False
for wmsIDChunk in imap(lambda x: wmsIDs[x:x + self._chunk_size], irange(0, len(wmsIDs), self._chunk_size)):
if do_wait and not utils.wait(self._chunk_time):
break
do_wait = True
for result in self._executor.execute(wmsIDChunk, *args, **kwargs):
yield result
示例12: __init__
def __init__(self, config, datasource_name):
InfoScanner.__init__(self, config, datasource_name)
ignore_list_default = lmap(lambda x: 'SEED_%d' % x, irange(10)) + ['DOBREAK', 'FILE_NAMES',
'GC_DEPFILES', 'GC_JOBID', 'GC_JOBNUM', 'GC_JOB_ID', 'GC_PARAM', 'GC_RUNTIME', 'GC_VERSION',
'JOB_RANDOM', 'JOBID', 'LANDINGZONE_LL', 'LANDINGZONE_UL', 'MY_JOB', 'MY_JOBID', 'MY_RUNTIME',
'SB_INPUT_FILES', 'SB_OUTPUT_FILES', 'SCRATCH_LL', 'SCRATCH_UL', 'SEEDS',
'SE_INPUT_FILES', 'SE_INPUT_PATH', 'SE_INPUT_PATTERN', 'SE_MINFILESIZE',
'SE_OUTPUT_FILES', 'SE_OUTPUT_PATH', 'SE_OUTPUT_PATTERN', 'SUBST_FILES']
self._ignore_vars = config.get_list('ignore task vars', ignore_list_default)
示例13: execute
def execute(self, wms_id_list, *args, **kwargs):
do_wait = False
chunk_pos_iter = irange(0, len(wms_id_list), self._chunk_size)
for wms_id_chunk in imap(lambda x: wms_id_list[x:x + self._chunk_size], chunk_pos_iter):
if do_wait and not wait(self._chunk_interval):
break
do_wait = True
for result in self._executor.execute(wms_id_chunk, *args, **kwargs):
yield result
示例14: __init__
def __init__(self, config):
InfoScanner.__init__(self, config)
ignoreDef = lmap(lambda x: 'SEED_%d' % x, irange(10)) + ['FILE_NAMES',
'SB_INPUT_FILES', 'SE_INPUT_FILES', 'SE_INPUT_PATH', 'SE_INPUT_PATTERN',
'SB_OUTPUT_FILES', 'SE_OUTPUT_FILES', 'SE_OUTPUT_PATH', 'SE_OUTPUT_PATTERN',
'SE_MINFILESIZE', 'DOBREAK', 'MY_RUNTIME', 'GC_RUNTIME', 'MY_JOBID', 'GC_JOB_ID',
'GC_VERSION', 'GC_DEPFILES', 'SUBST_FILES', 'SEEDS',
'SCRATCH_LL', 'SCRATCH_UL', 'LANDINGZONE_LL', 'LANDINGZONE_UL']
self._ignoreVars = config.getList('ignore task vars', ignoreDef)
示例15: _get_default_config_fn_iter
def _get_default_config_fn_iter(): # return possible default config files
if hostname: # host / domain specific
for part_idx in irange(hostname.count('.') + 1, -1, -1):
yield get_path_pkg('../config/%s.conf' % hostname.split('.', part_idx)[-1])
yield '/etc/grid-control.conf' # system specific
yield '~/.grid-control.conf' # user specific
yield get_path_pkg('../config/default.conf') # installation specific
if os.environ.get('GC_CONFIG'):
yield '$GC_CONFIG' # environment specific