本文整理汇总了Python中RESTInteractions.HTTPRequests类的典型用法代码示例。如果您正苦于以下问题:Python HTTPRequests类的具体用法?Python HTTPRequests怎么用?Python HTTPRequests使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了HTTPRequests类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_backendurls
def get_backendurls(self):
self.logger.info("Querying server %s for HTCondor schedds and pool names." % self.resturi)
server = HTTPRequests(self.resthost, self.config.TaskWorker.cmscert, self.config.TaskWorker.cmskey, retry = 2)
result = server.get(self.resturi, data={'subresource':'backendurls'})[0]['result'][0]
self.pool = str(result['htcondorPool'])
self.schedds = [str(i) for i in result['htcondorSchedds']]
self.logger.info("Resulting pool %s; schedds %s" % (self.pool, ",".join(self.schedds)))
示例2: __call__
def __call__(self):
if self.options.task is None:
return CommandResult(2001, 'ERROR: Task option is required')
server = HTTPRequests(self.cachedinfo['Server'] + ':' + str(self.cachedinfo['Port']))
self.logger.debug('Looking up detailed postmortem of task %s' % self.cachedinfo['RequestName'])
dictresult, postmortem, reason = server.get(self.uri + self.cachedinfo['RequestName'])
if postmortem != 200:
msg = "Problem retrieving postmortem:\ninput:%s\noutput:%s\nreason:%s" % (str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
return CommandResult(1, msg)
for workflow in dictresult['errors']:
self.logger.info("#%i %s" % (workflow['subOrder'], workflow['request']))
if self.options.verbose or self.options.outputfile:
self.printVerbose(workflow['details'], self.options.outputfile, os.path.join(self.requestarea, 'results', 'jobFailures.log'))
else:
self.logger.debug(" Aggregating job failures")
groupederrs = self.aggregateFailures(workflow['details'])
if not groupederrs:
self.logger.info(" No failures")
continue
self.logger.info(" List of failures and jobs per each failure: (one job could have more then one failure, one per each step)")
for hkey in groupederrs:
## removing duplicates and sort
joberrs = list(set(groupederrs[hkey]['jobs']))
joberrs.sort()
self.logger.info(' %s jobs failed with error "%s"' %(len(joberrs), groupederrs[hkey]['error']))
self.logger.info(' (%s)' %(', '.join([ str(jobid[0]) for jobid in joberrs])) )
return CommandResult(0, None)
示例3: mark_failed
def mark_failed(ids, failures_reasons):
"""
Mark the list of files as failed
:param ids: list of Oracle file ids to update
:param failures_reasons: list of strings with transfer failure messages
:return: 0 success, 1 failure
"""
try:
oracleDB = HTTPRequests(rest_filetransfers,
proxy,
proxy)
data = dict()
data['asoworker'] = 'asoless'
data['subresource'] = 'updateTransfers'
data['list_of_ids'] = ids
data['list_of_transfer_state'] = ["FAILED" for _ in ids]
data['list_of_failure_reason'] = failures_reasons
data['list_of_retry_value'] = [0 for _ in ids]
oracleDB.post('/filetransfers',
data=encodeRequest(data))
logging.debug("Marked failed %s", ids)
except Exception:
logging.exception("Error updating documents")
return 1
return 0
示例4: kill
def kill(self, job):
"""Kill all the jobs on the task."""
"""
if not os.path.exists(job.inputdata.ui_working_dir):
raise CRABServerError('Workdir "%s" not found.' %
job.inputdata.ui_working_dir)
if not job.master:
cmd = 'crab -kill all -c %s' % job.inputdata.ui_working_dir
else:
cmd = 'crab -kill %d -c %s' % (int(job.id) + 1,
job.inputdata.ui_working_dir)
self._send_with_retry(cmd, 'kill', job.backend.crab_env)
return True
"""
try:
server = HTTPRequests(job.backend.server_name, job.backend.userproxy)
resource = job.backend.apiresource+'workflow'
dictresult, status, reason = server.delete(resource, data = urllib.urlencode({ 'workflow' : job.backend.taskname}))
logger.info("Kill answer: %s" % status)
logger.info("Kill dictresult: %s" % dictresult)
return True
except HTTPException, e:
logger.error(type(e))
logger.error(e.req_headers)
logger.error(e.req_data)
logger.error(e.reason)
logger.error(e.message)
logger.error(e.headers)
logger.error(e.result)
logger.error(e.status)
logger.error(e.url)
logger.error(e.args)
raise e
示例5: __call__
def __call__(self):
server = HTTPRequests(self.serverurl, self.proxyfilename, self.proxyfilename, version=__version__)
self.logger.debug('Looking up detailed status of task %s' % self.cachedinfo['RequestName'])
user = self.cachedinfo['RequestName'].split("_")[2].split(":")[-1]
verbose = int(self.summary or self.long or self.json)
if self.idle:
verbose = 2
dictresult, status, reason = server.get(self.uri, data = { 'workflow' : self.cachedinfo['RequestName'], 'verbose': verbose })
dictresult = dictresult['result'][0] #take just the significant part
if status != 200:
msg = "Problem retrieving status:\ninput:%s\noutput:%s\nreason:%s" % (str(self.cachedinfo['RequestName']), str(dictresult), str(reason))
raise RESTCommunicationException(msg)
self.printShort(dictresult, user)
self.printPublication(dictresult)
if 'jobs' not in dictresult:
self.logger.info("\nNo jobs created yet!")
else:
# Note several options could be combined
if self.summary:
self.printSummary(dictresult)
if self.long:
self.printLong(dictresult)
if self.idle:
self.printIdle(dictresult, user)
if self.json:
self.logger.info(dictresult['jobs'])
示例6: mark_transferred
def mark_transferred(ids):
"""
Mark the list of files as tranferred
:param ids: list of Oracle file ids to update
:return: 0 success, 1 failure
"""
try:
oracleDB = HTTPRequests(rest_filetransfers,
proxy,
proxy)
logging.debug("Marking done %s", ids)
data = dict()
data['asoworker'] = 'asoless'
data['subresource'] = 'updateTransfers'
data['list_of_ids'] = ids
data['list_of_transfer_state'] = ["DONE" for _ in ids]
oracleDB.post('/filetransfers',
data=encodeRequest(data))
logging.debug("Marked good %s", ids)
except Exception:
logging.exception("Error updating documents")
return 1
return 0
示例7: uploadWarning
def uploadWarning(self, warning, userProxy, taskname):
try:
userServer = HTTPRequests(self.server["host"], userProxy, userProxy, retry=2)
configreq = {"subresource": "addwarning", "workflow": taskname, "warning": b64encode(warning)}
userServer.post(self.restURInoAPI + "/task", data=urllib.urlencode(configreq))
except HTTPException as hte:
self.logger.error(hte.headers)
self.logger.warning("Cannot add a warning to REST interface. Warning message: %s" % warning)
示例8: server_info
def server_info(subresource, server, proxyfilename, baseurl):
"""
Get relevant information about the server
"""
server = HTTPRequests(server, proxyfilename, proxyfilename, version=__version__)
dictresult, status, reason = server.get(baseurl, {'subresource' : subresource})
return dictresult['result'][0]
示例9: getCountTasksByStatusAbs
def getCountTasksByStatusAbs(self):
try:
resturi = "/crabserver/prod/task"
configreq = { 'minutes': "1000000000", 'subresource': "counttasksbystatus" }
server = HTTPRequests(self.resthost, "/data/certs/servicecert.pem", "/data/certs/servicekey.pem", retry = 2)
result = server.get(resturi, data = configreq)
return dict(result[0]['result'])
except Exception, e:
self.logger.debug("Error in getCountTasksByStatusAbs: %s"%str(e))
return []
示例10: __call__
def __call__(self):
proxyfile = self.options.proxyfile if self.options.proxyfile else self.proxyfilename
server = HTTPRequests(self.serverurl, proxyfile, proxyfile, version=__version__)
self.logger.debug('Looking type for task %s' % self.cachedinfo['RequestName'])
dictresult, status, reason = server.get(self.uri, data = {'workflow': self.cachedinfo['RequestName'], 'subresource': 'type'})
self.logger.debug('Task type %s' % dictresult['result'][0])
return dictresult['result'][0]
示例11: deleteWarnings
def deleteWarnings(self, userProxy, taskname):
userServer = HTTPRequests(self.server['host'], userProxy, userProxy, retry=2,
logger = self.logger)
configreq = {'subresource': 'deletewarnings',
'workflow': taskname}
try:
userServer.post(self.restURInoAPI + '/task', data = urllib.urlencode(configreq))
except HTTPException as hte:
self.logger.error("Error deleting warnings: %s", str(hte))
self.logger.warning("Can not delete warnings from REST interface.")
示例12: execute
def execute(self, *args, **kwargs):
wmwork = Workflow(name=kwargs['task']['tm_taskname'])
wmsubs = Subscription(fileset=args[0], workflow=wmwork,
split_algo=kwargs['task']['tm_split_algo'],
type=self.jobtypeMapper[kwargs['task']['tm_job_type']])
splitter = SplitterFactory()
jobfactory = splitter(subscription=wmsubs)
splitparam = kwargs['task']['tm_split_args']
splitparam['algorithm'] = kwargs['task']['tm_split_algo']
if kwargs['task']['tm_job_type'] == 'Analysis':
if kwargs['task']['tm_split_algo'] == 'FileBased':
splitparam['total_files'] = kwargs['task']['tm_totalunits']
elif kwargs['task']['tm_split_algo'] == 'LumiBased':
splitparam['total_lumis'] = kwargs['task']['tm_totalunits']
elif kwargs['task']['tm_split_algo'] == 'EventAwareLumiBased':
splitparam['total_events'] = kwargs['task']['tm_totalunits']
elif kwargs['task']['tm_job_type'] == 'PrivateMC':
if 'tm_events_per_lumi' in kwargs['task'] and kwargs['task']['tm_events_per_lumi']:
splitparam['events_per_lumi'] = kwargs['task']['tm_events_per_lumi']
if 'tm_generator' in kwargs['task'] and kwargs['task']['tm_generator'] == 'lhe':
splitparam['lheInputFiles'] = True
splitparam['applyLumiCorrection'] = True
factory = jobfactory(**splitparam)
numJobs = sum([len(jobgroup.getJobs()) for jobgroup in factory])
maxJobs = getattr(self.config.TaskWorker, 'maxJobsPerTask', 10000)
if numJobs == 0:
msg = "The CRAB3 server backend could not submit any job to the Grid scheduler:"
msg += " Splitting task %s" % (kwargs['task']['tm_taskname'])
if kwargs['task']['tm_input_dataset']:
msg += " on dataset %s" % (kwargs['task']['tm_input_dataset'])
msg += " with %s method does not generate any job" % (kwargs['task']['tm_split_algo'])
raise TaskWorkerException(msg)
elif numJobs > maxJobs:
raise TaskWorkerException("The splitting on your task generated %s jobs. The maximum number of jobs in each task is %s" %
(numJobs, maxJobs))
#printing duplicated lumis if any
lumiChecker = getattr(jobfactory, 'lumiChecker', None)
if lumiChecker and lumiChecker.splitLumiFiles:
self.logger.warning("The input dataset contains the following duplicated lumis %s" % lumiChecker.splitLumiFiles.keys())
#TODO use self.uploadWarning
try:
userServer = HTTPRequests(self.server['host'], kwargs['task']['user_proxy'], kwargs['task']['user_proxy'], retry = 2,
logger = self.logger)
configreq = {'subresource': 'addwarning',
'workflow': kwargs['task']['tm_taskname'],
'warning': b64encode('The CRAB3 server backend detected lumis split across files in the input dataset.'
' Will apply the necessary corrections in the splitting algorithms. You can ignore this message.')}
userServer.post(self.restURInoAPI + '/task', data = urllib.urlencode(configreq))
except HTTPException as hte:
self.logger.error(hte.headers)
self.logger.warning("Cannot add warning to REST after finding duplicates")
return Result(task = kwargs['task'], result = factory)
示例13: uploadWarning
def uploadWarning(self, warning, userProxy, taskname):
try:
userServer = HTTPRequests(self.server['host'], userProxy, userProxy, retry=2,
logger = self.logger)
configreq = {'subresource': 'addwarning',
'workflow': taskname,
'warning': b64encode(warning)}
userServer.post(self.restURInoAPI + '/task', data = urllib.urlencode(configreq))
except HTTPException as hte:
self.logger.error(hte.headers)
self.logger.warning("Cannot add a warning to REST interface. Warning message: %s" % warning)
示例14: getOutput
def getOutput(self, job):
"""Retrieve the output of the job."""
"""
if not os.path.exists(job.inputdata.ui_working_dir):
raise CRABServerError('Workdir "%s" not found.' %
job.inputdata.ui_working_dir)
cmd = 'crab -getoutput %d -c %s' % (int(job.id) + 1,
job.inputdata.ui_working_dir)
self._send_with_retry(cmd, 'getoutput', job.backend.crab_env)
# Make output files coming from the WMS readable.
for root, _, files in os.walk(os.path.join(job.inputdata.ui_working_dir,
'res')): # Just 'res'.
for f in files:
os.chmod(os.path.join(root, f), 0644)
"""
logger.info('getting Output for job %s:%s' % (job.backend.taskname, job.backend.crabid))
inputlist = [ ('workflow', job.backend.taskname)]
inputlist.extend([('subresource', 'logs')])
inputlist.extend( [('jobids', job.backend.crabid)] )
#srv='hammer-crab3.cern.ch'# 'cmsweb-testbed.cern.ch'
#proxypath= '/afs/cern.ch/user/r/riahi/public/proxy'#'/afs/cern.ch/user/s/spiga/public/PerValentaina/proxy'
#resource='/crabserver/dev/workflow'
#server = HTTPRequests(srv, proxypath)
server = HTTPRequests(job.backend.server_name, job.backend.userproxy)
resource = job.backend.apiresource+'workflow'
try:
import sys, traceback
dictresult, status, reason = server.get(resource, data = inputlist)
input = dictresult['result']
rcopy = remoteCopy(input, job.outputdir, logger)
rcopy()
logger.info("Task: %s - subjob: %s output copied" % (job.backend.taskname, job.backend.crabid))
tfile = tarfile.open(os.path.join(job.outputdir, "cmsRun_%s.log.tar.gz" % job.backend.crabid))
tfile.extractall(job.outputdir)
except HTTPException, e:
msg = type(e)
msg += " "+dir(e)
msg += " "+e.req_headers
msg += " "+e.req_data
msg += " "+e.reason
msg += " "+e.message
msg += " "+e.headers
msg += " "+e.result
msg += " "+e.status
msg += " "+e.url
msg += " "+e.args
logger.error(msg)
示例15: _execute
def _execute(self, resthost, resturi, config, task):
self.logger.info('Cleaning filemetadata older than 30 days..')
server = HTTPRequests(resthost, config.TaskWorker.cmscert, config.TaskWorker.cmskey, retry = 2)
ONE_MONTH = 24 * 30
try:
instance = resturi.split('/')[2]
server.delete('/crabserver/%s/filemetadata' % instance, data=urllib.urlencode({'hours': ONE_MONTH}))
#TODO return fro the server a value (e.g.: ["ok"]) to see if everything is ok
# result = server.delete('/crabserver/dev/filemetadata', data=urllib.urlencode({'hours': ONE_MONTH}))[0]['result'][0]
# self.logger.info('FMDCleaner, got %s' % result)
except HTTPException, hte:
self.logger.error(hte.headers)