本文整理汇总了Python中userinterface.Client类的典型用法代码示例。如果您正苦于以下问题:Python Client类的具体用法?Python Client怎么用?Python Client使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Client类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getPandaStatus
def getPandaStatus(self):
for country in self.config.sites.keys():
for group in self.config.sites[country].keys():
# country/group = None is equivalent to not specifing anything
self.factoryMessages.info('Polling panda status for country=%s, group=%s' % (country, group,))
error,self.config.sites[country][group]['siteStatus'] = Client.getJobStatisticsPerSite(countryGroup=country,workingGroup=group)
if error != 0:
raise PandaStatusFailure, 'Client.getJobStatisticsPerSite(countryGroup=%s,workingGroup=%s) error: %s' % (country, group, error)
for siteid, queues in self.config.sites[country][group].iteritems():
if siteid == 'siteStatus':
continue
if siteid in self.config.sites[country][group]['siteStatus']:
self.factoryMessages.debug('Panda status: %s (country=%s, group=%s) %s' % (siteid, country, group, self.config.sites[country][group]['siteStatus'][siteid]))
for queue in queues:
self.config.queues[queue]['pandaStatus'] = self.config.sites[country][group]['siteStatus'][siteid]
else:
# If panda knows nothing, then we assume all zeros (site may be inactive)
self.factoryMessages.debug('Panda status for siteid %s (country=%s, group=%s) not found - setting zeros in status to allow bootstraping of site.' % (siteid, country, group))
for queue in queues:
self.config.queues[queue]['pandaStatus'] = {'transferring': 0, 'activated': 0, 'running': 0, 'assigned': 0, 'failed': 0, 'finished': 0}
# Now poll site and cloud status to suppress pilots if a site is offline
# Take site staus out - better to use individual queue status from schedconfig
#self.factoryMessages.info('Polling panda for site status')
#error,self.pandaSiteStatus = Client.getSiteSpecs(siteType='all')
#if error != 0:
# raise PandaStatusFailure, '''Client.getSiteSpecs(siteType='all') error: %s''' % (error)
self.factoryMessages.info('Polling panda for cloud status')
error,self.pandaCloudStatus = Client.getCloudSpecs()
if error != 0:
raise PandaStatusFailure, 'Client.getCloudSpecs() error: %s' % (error)
示例2: killJobs
def killJobs(self, ids, code=None, verbose=False):
"""Kill jobs. Normal users can kill only their own jobs.
People with production VOMS role can kill any jobs.
Running jobs are killed when next heartbeat comes from the pilot.
Set code=9 if running jobs need to be killed immediately.
args:
ids: the list of PandaIDs
code: specify why the jobs are killed
2: expire
3: aborted
4: expire in waiting
7: retry by server
8: rebrokerage
9: force kill
50: kill by JEDI
91: kill user jobs with prod role
verbose: set True to see what's going on
returns:
status code
0: communication succeeded to the panda server
255: communication failure
the list of clouds (or Nones if tasks are not yet assigned)
"""
import userinterface.Client as Client
s,o = Client.killJobs(ids, code=code, verbose=verbose)
示例3: eraseDispDatasets
def eraseDispDatasets(ids):
print "eraseDispDatasets"
datasets = []
# get jobs
status,jobs = Client.getJobStatus(ids)
if status != 0:
return
# gather dispDBlcoks
for job in jobs:
# dispatchDS is not a DQ2 dataset in US
if job.cloud == 'US':
continue
# erase disp datasets for production jobs only
if job.prodSourceLabel != 'managed':
continue
for file in job.Files:
if file.dispatchDBlock == 'NULL':
continue
if (not file.dispatchDBlock in datasets) and \
re.search('_dis\d+$',file.dispatchDBlock) != None:
datasets.append(file.dispatchDBlock)
# erase
for dataset in datasets:
print 'erase %s' % dataset
status,out = ddm.DQ2.main('eraseDataset',dataset)
print out
示例4: uploadLog
def uploadLog(self):
if self.jediTaskID == None:
return 'cannot find jediTaskID'
strMsg = self.logger.dumpToString()
s,o = Client.uploadLog(strMsg,self.jediTaskID)
if s != 0:
return "failed to upload log with {0}.".format(s)
if o.startswith('http'):
return '<a href="{0}">log</a>'.format(o)
return o
示例5: getJobStatus
def getJobStatus(self, ids):
import userinterface.Client as Client
s,o = Client.getJobStatus(ids)
result = {}
if s != 0:
_logger.error('Error response code: %s %s' %(str(s), str(o)))
return result
for x in o:
result[x.PandaID] = x.jobStatus
return result
示例6: killJobs
def killJobs(jobList):
print 'Kill jobs'
_logger.debug('Kill jobs')
_logger.debug(str(jobList))
s,o = Client.killJobs(jobList) # Code 3 eqs. aborted status
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
return o
示例7: submitJobs
def submitJobs(jobList):
print 'Submit jobs'
_logger.debug('Submit jobs')
_logger.debug(str(jobList))
s,o = Client.submitJobs(jobList)
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
for x in o:
_logger.debug("PandaID=%s" % x[0])
return o
示例8: getStatus
def getStatus(self, expectedStates):
idList = [job['jobID'] for job in self.__jobList]
print idList
status, jobInfoList = Client.getJobStatus(idList)
print jobInfoList
assert status == 0, "Retrieval of job state finished with status: %s" %status
for job in jobInfoList:
assert job.jobStatus in expectedStates, "Recently defined job was not in states %s (PandaID: %s jobStatus: %s)" %(expectedStates, job.PandaID, job.jobStatus)
return jobInfoList
示例9: eraseDispDatasets
def eraseDispDatasets(ids):
datasets = []
# get jobs
status,jobs = Client.getJobStatus(ids)
if status != 0:
return
# gather dispDBlcoks
for job in jobs:
for file in job.Files:
if not file.dispatchDBlock in datasets:
datasets.append(file.dispatchDBlock)
# erase
for dataset in datasets:
ddm.DQ2.main(['eraseDataset',datasets])
示例10: generateJobs
def generateJobs(self):
for i in range(self.__nJobs):
job = self.defineEvgen16Job(i)
self.__jobList.append({'jobSpec': job, 'jobID': None})
status, output = Client.submitJobs([job['jobSpec'] for job in self.__jobList]) #Return from submitJobs: ret.append((job.PandaID,job.jobDefinitionID,{'jobsetID':job.jobsetID}))
assert status == 0, "Submission of jobs finished with status: %s" %status
assert len(self.__jobList) == len(output), "Not all jobs seem to have been submitted properly"
for job, ids in zip(self.__jobList, output):
jobID = ids[0]
job['jobID'] = jobID
print("Generated job PandaID = %s" %jobID)
return
示例11: update_status
def update_status():
# Method to sync PandaDB job status and local job status
# show users jobs
jobs = Job.query.filter(Job.pandaid.isnot(None))\
.filter(~Job.status.in_(['finished', 'failed', 'cancelled']))\
.all()
ids = []
localids = []
for job in jobs:
localids.append(job.id)
ids.append(job.pandaid)
# get status update
if len(ids) > 0:
_logger.debug('getJobStatus: ' + str(ids))
s, o = Client.getJobStatus(ids)
_logger.debug(o)
_logger.debug(s)
_logger.debug("---------------------")
for job in jobs:
if job.pandaid in ids:
for obj in o:
if obj.PandaID == job.pandaid:
# Update attemptNr if changed
if job.attemptnr not in [obj.attemptNr]:
job.attemptnr = obj.attemptNr
jobs_.save(job)
# Update status if changed
if job.status != obj.jobStatus:
job.status = obj.jobStatus
job.modification_time = datetime.utcnow()
jobs_.save(job)
return localids
示例12: int
options,args = optP.parse_args()
aSrvID = None
codeV = None
useMailAsIDV = False
if options.forceKill:
codeV = 9
elif options.killUserJobs:
codeV = 91
else:
try:
codeV = int(options.codeV)
except Exception:
pass
if options.killOwnProdJobs:
useMailAsIDV = True
if len(args) == 1:
Client.killJobs([args[0]], code=codeV, useMailAsID=useMailAsIDV, keepUnmerged=options.keepUnmerged, jobSubStatus=options.jobSubStatus)
else:
startID = int(args[0])
endID = int(args[1])
if startID > endID:
print '%d is less than %d' % (endID,startID)
sys.exit(1)
Client.killJobs(range(startID,endID+1),code=codeV,useMailAsID=useMailAsIDV,keepUnmerged=options.keepUnmerged, jobSubStatus=options.jobSubStatus)
示例13: FileSpec
job.prodDBlock = 'pandatest.000003.dd.input'
job.destinationDBlock = 'panda.destDB.%s' % commands.getoutput('/usr/bin/uuidgen')
job.destinationSE = 'BNL_SE'
ids = {'pandatest.000003.dd.input._00028.junk':'6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk':'98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk':'33660dd5-7cef-422a-a7fc-6c24cb10deb1'}
for lfn in ids.keys():
file = FileSpec()
file.lfn = lfn
file.GUID = ids[file.lfn]
file.dataset = 'pandatest.000003.dd.input'
file.type = 'input'
job.addFile(file)
s,o = Client.submitJobs([job])
print "---------------------"
print s
print o
print "---------------------"
s,o = Client.getJobStatus([4934, 4766, 4767, 4768, 4769])
print s
if s == 0:
for job in o:
if job == None:
continue
print job.PandaID
for file in job.Files:
print file.lfn,file.type
print "---------------------"
s,o = Client.queryPandaIDs([0])
示例14: putFile
def putFile(req,file):
if not Protocol.isSecure(req):
return False
if '/CN=limited proxy' in req.subprocess_env['SSL_CLIENT_S_DN']:
return False
_logger.debug("putFile : start %s %s" % (req.subprocess_env['SSL_CLIENT_S_DN'],file.filename))
# size check
fullSizeLimit = 768*1024*1024
if not file.filename.startswith('sources.'):
noBuild = True
sizeLimit = 100*1024*1024
else:
noBuild = False
sizeLimit = fullSizeLimit
# get file size
contentLength = 0
try:
contentLength = long(req.headers_in["content-length"])
except:
if req.headers_in.has_key("content-length"):
_logger.error("cannot get CL : %s" % req.headers_in["content-length"])
else:
_logger.error("no CL")
_logger.debug("size %s" % contentLength)
if contentLength > sizeLimit:
errStr = "ERROR : Upload failure. Exceeded size limit %s>%s." % (contentLength,sizeLimit)
if noBuild:
errStr += " Please submit the job without --noBuild/--libDS since those options impose a tighter size limit"
else:
errStr += " Please remove redundant files from your workarea"
_logger.error(errStr)
_logger.debug("putFile : end")
return errStr
try:
fileFullPath = '%s/%s' % (panda_config.cache_dir,file.filename.split('/')[-1])
# avoid overwriting
if os.path.exists(fileFullPath):
# touch
os.utime(fileFullPath,None)
# send error message
errStr = "ERROR : Cannot overwrite file"
_logger.debug('putFile : cannot overwrite file %s' % file.filename)
_logger.debug("putFile : end")
return errStr
# write
fo = open(fileFullPath,'wb')
fileContent = file.file.read()
fo.write(fileContent)
fo.close()
except:
errStr = "ERROR : Cannot write file"
_logger.error(errStr)
_logger.debug("putFile : end")
return errStr
# checksum
try:
# decode Footer
footer = fileContent[-8:]
checkSum,isize = struct.unpack("II",footer)
_logger.debug("CRC from gzip Footer %s" % checkSum)
except:
# calculate on the fly
"""
import zlib
checkSum = zlib.adler32(fileContent) & 0xFFFFFFFF
"""
# use None to avoid delay for now
checkSum = None
_logger.debug("CRC calculated %s" % checkSum)
# file size
fileSize = len(fileContent)
# user name
username = cleanUserID(req.subprocess_env['SSL_CLIENT_S_DN'])
_logger.debug("putFile : written dn=%s file=%s size=%s crc=%s" % \
(username,file.filename,fileSize,checkSum))
# put file info to DB
statClient,outClient = Client.insertSandboxFileInfo(username,file.filename,
fileSize,checkSum)
if statClient != 0 or outClient.startswith("ERROR"):
_logger.error("putFile : failed to put sandbox to DB with %s %s" % (statClient,outClient))
#_logger.debug("putFile : end")
#return "ERROR : Cannot insert sandbox to DB"
else:
_logger.debug("putFile : inserted sandbox to DB with %s" % outClient)
# store to cassandra
if hasattr(panda_config,'cacheUseCassandra') and panda_config.cacheUseCassandra == True:
try:
# time-stamp
timeNow = datetime.datetime.utcnow()
creationTime = timeNow.strftime('%Y-%m-%d %H:%M:%S')
# user name
username = req.subprocess_env['SSL_CLIENT_S_DN']
username = username.replace('/CN=proxy','')
username = username.replace('/CN=limited proxy','')
# file size
fileSize = len(fileContent)
# key
fileKeyName = file.filename.split('/')[-1]
sizeCheckSum = '%s:%s' % (fileSize,checkSum)
# insert to cassandra
#.........这里部分代码省略.........
示例15: FileSpec
job.transformation = 'http://pandawms.org/pandawms-jobcache/lsst-trf-phosim332.sh'
job.destinationDBlock = datasetName
#job.destinationSE = destName
job.destinationSE = 'local'
job.currentPriority = 1000
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'panda'
#job.prodSourceLabel = 'ptest'
#job.prodSourceLabel = 'test'
#job.prodSourceLabel = 'ptest'
### 2014-01-27
#job.prodSourceLabel = 'user'
job.prodSourceLabel = 'panda'
job.computingSite = site
job.jobParameters = ""
job.VO = "lsst"
fileOL = FileSpec()
fileOL.lfn = "%s.job.log.tgz" % job.jobName
fileOL.destinationDBlock = job.destinationDBlock
fileOL.destinationSE = job.destinationSE
fileOL.dataset = job.destinationDBlock
fileOL.type = 'log'
job.addFile(fileOL)
s,o = Client.submitJobs([job],srvID=aSrvID)
print s
for x in o:
print "PandaID=%s" % x[0]