本文整理汇总了Python中utils.workflowInfo函数的典型用法代码示例。如果您正苦于以下问题:Python workflowInfo函数的具体用法?Python workflowInfo怎么用?Python workflowInfo使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了workflowInfo函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rejector
def rejector(url, specific, options=None):
up = componentInfo()
if specific.startswith('/'):
pass
else:
wfo = session.query(Workflow).filter(Workflow.name == specific).first()
if not wfo:
print "cannot reject",spec
return
results=[]
wfi = workflowInfo(url, wfo.name)
reqMgrClient.invalidateWorkflow(url, wfo.name, current_status=wfi.request['RequestStatus'])
#if wfi.request['RequestStatus'] in ['assignment-approved','new','completed']:
# #results.append( reqMgrClient.rejectWorkflow(url, wfo.name))
# reqMgrClient.rejectWorkflow(url, wfo.name)
#else:
# #results.append( reqMgrClient.abortWorkflow(url, wfo.name))
# reqMgrClient.abortWorkflow(url, wfo.name)
datasets = wfi.request['OutputDatasets']
for dataset in datasets:
if options.keep:
print "keeping",dataset,"in its current status"
else:
results.append( setDatasetStatus(dataset, 'INVALID') )
if all(map(lambda result : result in ['None',None,True],results)):
wfo.status = 'forget'
session.commit()
print wfo.name,"and",datasets,"are rejected"
if options and options.clone:
schema = wfi.getSchema()
schema['Requestor'] = os.getenv('USER')
schema['Group'] = 'DATAOPS'
schema['OriginalRequestName'] = wfo.name
if 'ProcessingVersion' in schema:
schema['ProcessingVersion']+=1
else:
schema['ProcessingVersion']=2
##schema.pop('RequestDate') ## ok then, let's not reset the time stamp
if options.Memory:
schema['Memory'] = options.Memory
response = reqMgrClient.submitWorkflow(url, schema)
m = re.search("details\/(.*)\'",response)
if not m:
print "error in cloning",wfo.name
print response
return
newWorkflow = m.group(1)
data = reqMgrClient.setWorkflowApproved(url, newWorkflow)
print data
wfo.status = 'trouble'
session.commit()
else:
print "error in rejecting",wfo.name,results
示例2: invalidator
def invalidator(url, invalid_status='INVALID'):
mcm = McMClient(dev=False)
invalids = mcm.getA('invalidations',query='status=new')
print len(invalids),"Object to be invalidated"
for invalid in invalids:
acknowledge= False
if invalid['type'] == 'request':
wfn = invalid['object']
print "need to invalidate the workflow",wfn
wfo = session.query(Workflow).filter(Workflow.name == wfn).first()
if wfo:
## set forget of that thing (although checkor will recover from it)
wfo.status = 'forget'
session.commit()
wfi = workflowInfo(url, wfn)
success = "not rejected"
if wfi.request['RequestStatus'] in ['assignment-approved','new','completed']:
success = reqMgrClient.rejectWorkflow(url, wfn)
pass
else:
success = reqMgrClient.abortWorkflow(url, wfn)
pass
print success
acknowledge= True
elif invalid['type'] == 'dataset':
dataset = invalid['object']
if 'None-' in dataset: continue
if 'FAKE-' in dataset: continue
print "setting",dataset,"to",invalid_status
success = "not invalidated"
success = setDatasetStatusDBS3.setStatusDBS3('https://cmsweb.cern.ch/dbs/prod/global/DBSWriter', dataset, invalid_status, None)
print success
## make a delete request from everywhere we can find ?
acknowledge= True
else:
print "\t\t",invalid['type']," type not recognized"
if acknowledge:
## acknoldge invalidation in mcm, provided we can have the api
print "No acknowledgment api yet available"
示例3: releasor
def releasor():
if duplicateLock() : return
SI = siteInfo()
CI = campaignInfo()
LI = lockInfo()
tiers_no_custodial = ['MINIADOSIM']
wfs = []
for fetch in ['done','forget']:
wfs.extend( session.query(Workflow).filter(Workflow.status==fetch).all() )
for wfo in wfs:
wfi = workflowInfo(url, wfo.name )
announced_log = filter(lambda change : change["Status"] in ["closed-out","normal-archived","announced"],wfi.request['RequestTransition'])
if not announced_log:
print "Cannot figure out when",wfo.name,"was finished"
continue
now = time.mktime(time.gmtime()) / (60*60*24.)
then = announced_log[-1]['UpdateTime'] / (60.*60.*24.)
if (now-then) <2:
print "workflow",wfo.name, "finished",now-then,"days ago. Too fresh to clean"
continue
else:
print "workflow",wfo.name,"has finished",now-then,"days ago."
(_,primaries,_,secondaries) = wfi.getIO()
outputs = wfi.request['OutputDatasets']
datasets_to_check = list(primaries)+list(secondaries)+outputs
for dataset in datasets_to_check:
(_,_,_,tier) = dataset.split('/')
## check custodial if required
if tier not in tiers_no_custodial:
## check not used anymore by anything
## unlock output and input everywhere if so
pass
示例4: getPriority
def getPriority(url, workflow):
return workflowInfo( url,workflow).getPriority()
示例5: singleClone
def singleClone(url, wfname, actions, comment, do=False):
wfi = workflowInfo(url, wfname)
payload = wfi.getSchema()
initial = wfi.request
payload['Requestor'] = os.getenv('USER')
payload['Group'] = 'DATAOPS'
payload['OriginalRequestName'] = initial['RequestName']
payload['RequestPriority'] = initial['RequestPriority']
if 'ProcessingVersion' in initial:
payload['ProcessingVersion'] = int(initial['ProcessingVersion']) +1
else:
payload['ProcessingVersion'] = 2
## drop parameters on the way to reqmgr2
paramBlacklist = ['BlockCloseMaxEvents', 'BlockCloseMaxFiles', 'BlockCloseMaxSize', 'BlockCloseMaxWaitTime',
'CouchWorkloadDBName', 'CustodialGroup', 'CustodialSubType', 'Dashboard',
'GracePeriod', 'HardTimeout', 'InitialPriority', 'inputMode', 'MaxMergeEvents', 'MaxMergeSize',
'MaxRSS', 'MaxVSize', 'MinMergeSize', 'NonCustodialGroup', 'NonCustodialSubType',
'OutputDatasets', 'ReqMgr2Only', 'RequestDate' 'RequestorDN', 'RequestName', 'RequestStatus',
'RequestTransition', 'RequestWorkflow', 'SiteWhitelist', 'SoftTimeout', 'SoftwareVersions',
'SubscriptionPriority', 'Team', 'timeStamp', 'TrustSitelists', 'TrustPUSitelists',
'TotalEstimatedJobs', 'TotalInputEvents', 'TotalInputLumis', 'TotalInputFiles','checkbox',
'DN', 'AutoApproveSubscriptionSites', 'NonCustodialSites', 'CustodialSites', 'OriginalRequestName', 'Teams', 'OutputModulesLFNBases',
'SiteBlacklist', 'AllowOpportunistic', '_id']
for p in paramBlacklist:
if p in payload:
payload.pop( p )
pass
if actions:
for action in actions:
if action.startswith('mem') and actions[action] != "" and actions[action] != 'Same':
if 'TaskChain' in payload:
print "Setting memory for clone of task chain"
it=1
while True:
t = 'Task%d'%it
it+=1
if t in payload:
payload[t]['Memory'] = actions[action]
print "Memory set for Task%d"%it
else:
break
else:
print "Setting memory for non-taskchain workflow"
payload['Memory'] = actions[action]
print "Memory set to " + actions[action]
#This line is doesn't work for some reason
# wfi.sendLog('actor','Memory of clone set to %d'%actions[action])
print "Clone payload"
# print json.dumps( payload , indent=2)
print actions
#Create clone
clone = reqMgrClient.submitWorkflow(url, payload)
if not clone:
print "Error in making clone for",initial["RequestName"]
clone = reqMgrClient.submitWorkflow(url, payload)
if not clone:
print "Error twice in making clone for",initial["RequestName"]
sendLog('actor','Failed to make a clone twice for %s!'%initial["RequestName"],level='critical')
wfi.sendLog('actor','Failed to make a clone twice for %s!'%initial["RequestName"])
return None
if actions:
for action in actions:
if action.startswith('split'):
cloneinfo = workflowInfo(url, clone)
splittings = cloneinfo.getSplittings()
if actions[action] != 'Same' and actions[action] != 'max' and actions[action] != '':
factor = int(actions[action][0:-1]) if 'x' in actions[action] else 2
for split in splittings:
for act in ['avg_events_per_job','events_per_job','lumis_per_job']:
if act in split:
wfi.sendLog('actor','Changing %s (%d) by a factor %d'%( act, split[act], factor))
print "Changing %s (%d) by a factor %d"%( act, split[act], factor),
split[act] /= factor
print "to",split[act]
break
split['requestName'] = clone
print "changing the splitting of",clone
print json.dumps( split, indent=2 )
print reqMgrClient.setWorkflowSplitting(url, clone, split )
elif 'max' in actions[action]:
for split in splittings:
for act in ['avg_events_per_job','events_per_job','lumis_per_job']:
if act in split:
wfi.sendLog('actor','Max splitting set for %s (%d'%( act, split[act]))
print "Changing %s (%d) "%( act, split[act]),
split[act] = 1
print "to max splitting ",split[act]
break
split['requestName'] = clone
print "changing the splitting of",clone
print json.dumps( split, indent=2 )
#.........这里部分代码省略.........
示例6: defaultdict
wfs_no_location_in_GQ = defaultdict(list)
si = siteInfo()
#bad_blocks = defaultdict( set )
unprocessable = set()
not_runable_acdc=set()
agents_down = defaultdict(set)
failed_workflow = set()
files_locations = {}
stuck_all_done = set()
heavy_duty = {}
for wf in wfs:
if spec and not spec in wf['RequestName']: continue
wfi = workflowInfo(url, wf['RequestName'], request=wf)
sitewhitelist = wfi.request['SiteWhitelist']
wqs = wfi.getWorkQueue()
stats = wfi.getWMStats()
if not 'AgentJobInfo' in stats: stats['AgentJobInfo'] = {}
## skip wf that unified does not know about, leaves acdc
wfo = session.query(Workflow).filter(Workflow.name == wf['RequestName']).first()
if not (wfo or wf['RequestType']=='Resubmission'):
print "not knonw or not acdc : %s"%(wf['RequestName'])
continue
## test the heavyness
if 'TotalInputLumis' in wf and 'TotalEstimatedJobs' in wf and wf['TotalEstimatedJobs']:
heavy = (wf['TotalInputLumis'] / float(wf['TotalEstimatedJobs']))
示例7: outcleanor
def outcleanor(url, options):
if options.approve:
for user in ['*Vlimant']:#,'*Cremonesi']:
deletes = listDelete( url , user = user)
for (site,who,tid) in deletes:
if 'MSS' in site: continue### ever
print site,who,tid
print "approving deletion"
print approveSubscription(url, tid, nodes = [site], comments = 'Production cleaning by data ops')
return
sites_and_datasets = defaultdict(list)
our_copies = defaultdict(list)
wf_cleaned = {}
wfs = []
for fetch in options.fetch.split(','):
wfs.extend(session.query(Workflow).filter(Workflow.status==fetch).all())
random.shuffle( wfs )
last_answer = None
for wfo in wfs :
if options.number and len(wf_cleaned)>= options.number:
print "Reached",options.number,"cleaned"
break
print '-'*100
wfi = workflowInfo(url, wfo.name)
goes = {} # boolean per output
for dataset in wfi.request['OutputDatasets']:
goes[dataset] = False
keep_one_out = True
status = getDatasetStatus( dataset )
print "\n\tLooking at",dataset,status,"\n"
vetoes = None
if status == 'INVALID':
vetoes = ['Export','Buffer'] ## can take themselves out
keep_one_out = False # just wipe clean
elif status == None:
print dataset,"actually does not exist. skip"
goes[dataset] = True
continue
elif status in ['PRODUCTION','VALID'] and wfo.status in ['forget','trouble']:
print dataset,"should probably be invalidated. (",wfo.status,") skip"
keep_one_out = False # just wipe clean
continue ## you are not sure. just skip it for the time being
elif status == 'PRODUCTION' and wfo.status in ['clean']:
print dataset,"should probably be set valid .skip"
continue ## you are not sure. just skip it for the time being
if status == 'VALID' and dataset.startswith('/MinBias'):
print "This is a /MinBias. skip"
continue
if '/DQM' in dataset:
keep_one_out = False
total_size = getDatasetSize( dataset )
our_presence = getDatasetPresence(url, dataset, complete=None, group="DataOps", vetoes=vetoes)
also_our_presence = getDatasetPresence(url, dataset, complete=None, group="", vetoes=vetoes)
## merge in one unique dict
for site in also_our_presence:
if site in our_presence:
there,frac = our_presence[site]
other,ofrac = also_our_presence[site]
our_presence[site] = (max(there,other),max(frac,ofrac))
else:
our_presence[site] = also_our_presence[site]
if our_presence: print our_presence
## analysis ops copies need to be taken into account
anaops_presence = getDatasetPresence(url, dataset, complete=None, group="AnalysisOps")
own_by_anaops = anaops_presence.keys()
## all our copies
to_be_cleaned = our_presence.keys()
if not len(to_be_cleaned):
print "nowhere to be found of ours,",len(own_by_anaops),"in analysi ops pool"
goes[dataset] = True
continue
print "Where we own bits of dataset"
print to_be_cleaned
if len(own_by_anaops):
## remove site with the anaops copies
to_be_cleaned = list(set(to_be_cleaned) - set(own_by_anaops))
keep_one_out = False ## in that case, just remove our copies
print "Own by anaops (therefore not keep a copy of ours)"
print own_by_anaops
else:
#.........这里部分代码省略.........
示例8: recoveror
def recoveror(url,specific,options=None):
if userLock('recoveror'): return
up = componentInfo()
CI = campaignInfo()
UC = unifiedConfiguration()
def make_int_keys( d ):
for code in d:
d[int(code)] = d.pop(code)
error_codes_to_recover = UC.get('error_codes_to_recover')
error_codes_to_block = UC.get('error_codes_to_block')
error_codes_to_notify = UC.get('error_codes_to_notify')
make_int_keys( error_codes_to_recover )
make_int_keys( error_codes_to_block )
make_int_keys( error_codes_to_notify )
wfs = session.query(Workflow).filter(Workflow.status == 'assistance-recovery').all()
if specific:
wfs.extend( session.query(Workflow).filter(Workflow.status == 'assistance-manual').all() )
for wfo in wfs:
if specific and not specific in wfo.name:continue
if not specific and 'manual' in wfo.status: continue
wfi = workflowInfo(url, wfo.name, deprecated=True) ## need deprecated info for mergedlfnbase
## need a way to verify that this is the first round of ACDC, since the second round will have to be on the ACDC themselves
all_errors = None
try:
wfi.getSummary()
all_errors = wfi.summary['errors']
except:
pass
print '-'*100
print "Looking at",wfo.name,"for recovery options"
if not len(all_errors):
print "\tno error for",wfo.name
task_to_recover = defaultdict(list)
message_to_ops = ""
message_to_user = ""
recover=True
if 'LheInputFilese' in wfi.request and wfi.request['LheInputFiles']:
## we do not try to recover pLHE
recover = False
if 'Campaign' in wfi.request:
c = wfi.request['Campaign']
if c in CI.campaigns and 'recover' in CI.campaigns[c]:
recover=CI.campaigns[c]['recover']
for task,errors in all_errors.items():
print "\tTask",task
## collect all error codes and #jobs regardless of step at which it occured
all_codes = []
for name, codes in errors.items():
if type(codes)==int: continue
all_codes.extend( [(int(code),info['jobs'],name,list(set([e['type'] for e in info['errors']])),list(set([e['details'] for e in info['errors']])) ) for code,info in codes.items()] )
all_codes.sort(key=lambda i:i[1], reverse=True)
sum_failed = sum([l[1] for l in all_codes])
for errorCode,njobs,name,types,details in all_codes:
rate = 100*njobs/float(sum_failed)
#print ("\t\t %10d (%6s%%) failures with error code %10d (%"+str(max_legend)+"s) at stage %s")%(njobs, "%4.2f"%rate, errorCode, legend, name)
print ("\t\t %10d (%6s%%) failures with error code %10d (%30s) at stage %s")%(njobs, "%4.2f"%rate, errorCode, ','.join(types), name)
added_in_recover=False
#if options.go:
# force the recovery of any task with error ?
if errorCode in error_codes_to_recover:
## the error code is registered
for case in error_codes_to_recover[errorCode]:
match = case['details']
matched= (match==None)
if not matched:
matched=False
for detail in details:
if match in detail:
print "[recover] Could find keyword",match,"in"
print 50*"#"
print detail
print 50*"#"
matched = True
break
if matched and rate > case['rate']:
print "\t\t => we should be able to recover that", case['legend']
task_to_recover[task].append( (code,case) )
added_in_recover=True
message_to_user = ""
#.........这里部分代码省略.........
示例9: main
def main():
#Create option parser
usage = "usage: %prog (-w workflow|-f filelist) (-t TASK|--all) [--tesbed]"
parser = OptionParser(usage=usage)
parser.add_option("-f","--file", dest="file", default=None,
help="Text file with a list of workflows")
parser.add_option("-w","--workflow", default=None,
help="Coma separated list of wf to handle")
parser.add_option("-t","--task", default=None,
help="Coma separated task to be recovered")
parser.add_option("-p","--path", default=None,
help="Coma separated list of paths to recover")
parser.add_option("-a","--all",
help="Make acdc for all tasks to be recovered",default=False, action='store_true')
parser.add_option("-m","--memory", dest="memory", default=None, type=int,
help="Memory to override the original request memory")
parser.add_option("-c","--mcore", dest="mcore", default=None,
help="Multicore to override the original request multicore")
parser.add_option("--testbed", default=False, action="store_true")
(options, args) = parser.parse_args()
global url
url = testbed_url if options.testbed else prod_url
if options.all : options.task = 'all'
if not options.task:
parser.error("Provide the -t Task Name or --all")
sys.exit(1)
if not ((options.workflow) or (options.path) or (options.file)):
parser.error("Provide the -w Workflow Name or the -p path or the -f workflow filelist")
sys.exit(1)
wfs = None
wf_and_task = defaultdict(set)
if options.file:
wfs = [l.strip() for l in open(options.file) if l.strip()]
elif options.workflow:
wfs = options.workflow.split(',')
elif options.path:
## self contained
paths = options.path.split(',')
for p in paths:
_,wf,t = p.split('/',2)
wf_and_task[wf].add('/%s/%s'%(wf,t))
else:
parser.error("Either provide a -f filelist or a -w workflow or -p path")
sys.exit(1)
if not wf_and_task:
if options.task == 'all':
for wfname in wfs:
wf_and_task[wfname] = None
else:
for wfname in wfs:
wf_and_task[wfname].update( [('/%s/%s'%(wfname,task)).replace('//','/') for task in options.task.split(',')] )
if not wf_and_task:
parser.error("Provide the -w Workflow Name and the -t Task Name or --all")
sys.exit(1)
for wfname,tasks in wf_and_task.items():
wfi = workflowInfo(url, wfname)
if tasks == None:
where,how_much,how_much_where = wfi.getRecoveryInfo()
tasks = sorted(how_much.keys())
else:
tasks = sorted(tasks)
created = {}
print "Workflow:",wfname
print "Tasks:",tasks
for task in tasks:
r = makeACDC(url=url, wfi=wfi, task=task,
memory = options.memory,
mcore = options.mcore)
if not r:
print "Error in creating ACDC for",task,"on",wfname
break
created[task] = r
if len(created)!=len(tasks):
print "Error in creating all required ACDCs"
sys.exit(1)
print "Created:"
for task in created:
print created[task],"for",task
示例10: assignor
def assignor(url ,specific = None, talk=True, options=None):
if userLock(): return
if duplicateLock(): return
if not componentInfo().check(): return
UC = unifiedConfiguration()
CI = campaignInfo()
#SI = siteInfo()
SI = global_SI()
#NLI = newLockInfo()
#if not NLI.free() and not options.go: return
LI = lockInfo()
if not LI.free() and not options.go: return
n_assigned = 0
n_stalled = 0
wfos=[]
fetch_from = []
if specific or options.early:
fetch_from.extend(['considered','staging'])
if specific:
fetch_from.extend(['considered-tried'])
fetch_from.extend(['staged'])
if options.from_status:
fetch_from = options.from_status.split(',')
print "Overriding to read from",fetch_from
for status in fetch_from:
wfos.extend(session.query(Workflow).filter(Workflow.status==status).all())
## in case of partial, go for fetching a list from json ?
#if options.partial and not specific:
# pass
dataset_endpoints = json.loads(open('%s/dataset_endpoints.json'%monitor_dir).read())
aaa_mapping = json.loads(open('%s/equalizor.json'%monitor_pub_dir).read())['mapping']
all_stuck = set()
all_stuck.update( json.loads( open('%s/stuck_transfers.json'%monitor_dir).read() ))
all_stuck.update( getAllStuckDataset())
max_per_round = UC.get('max_per_round').get('assignor',None)
max_cpuh_block = UC.get('max_cpuh_block')
random.shuffle( wfos )
for wfo in wfos:
if options.limit and (n_stalled+n_assigned)>options.limit:
break
if max_per_round and (n_stalled+n_assigned)>max_per_round:
break
if specific:
if not any(map(lambda sp: sp in wfo.name,specific.split(','))): continue
#if not specific in wfo.name: continue
print "\n\n"
wfh = workflowInfo( url, wfo.name)
if options.priority and int(wfh.request['RequestPriority']) < options.priority:
continue
options_text=""
if options.early: options_text+=", early option is ON"
if options.partial:
options_text+=", partial option is ON"
options_text+=", good fraction is %.2f"%options.good_enough
wfh.sendLog('assignor',"%s to be assigned%s"%(wfo.name, options_text))
## the site whitelist takes into account siteInfo, campaignInfo, memory and cores
(lheinput,primary,parent,secondary, sites_allowed) = wfh.getSiteWhiteList()
output_tiers = list(set([o.split('/')[-1] for o in wfh.request['OutputDatasets']]))
is_stuck = (all_stuck & primary)
if is_stuck:
wfh.sendLog('assignor',"%s are stuck input"%(','.join( is_stuck)))
## check if by configuration we gave it a GO
no_go = False
if not wfh.go(log=True) and not options.go:
no_go = True
allowed_secondary = {}
assign_parameters = {}
check_secondary = False
for campaign in wfh.getCampaigns():
if campaign in CI.campaigns:
assign_parameters.update( CI.campaigns[campaign] )
if campaign in CI.campaigns and 'secondaries' in CI.campaigns[campaign]:
if CI.campaigns[campaign]['secondaries']:
allowed_secondary.update( CI.campaigns[campaign]['secondaries'] )
check_secondary = True
if campaign in CI.campaigns and 'banned_tier' in CI.campaigns[campaign]:
#.........这里部分代码省略.........
示例11: invalidator
def invalidator(url, invalid_status='INVALID'):
use_mcm = True
up = componentInfo(mcm=use_mcm)
if not up.check(): return
mcm = McMClient(dev=False)
invalids = mcm.getA('invalidations',query='status=announced')
print len(invalids),"Object to be invalidated"
text_to_batch = defaultdict(str)
text_to_request = defaultdict(str)
for invalid in invalids:
acknowledge= False
pid = invalid['prepid']
batch_lookup = invalid['prepid']
text = ""
if invalid['type'] == 'request':
wfn = invalid['object']
print "need to invalidate the workflow",wfn
wfo = session.query(Workflow).filter(Workflow.name == wfn).first()
if wfo:
## set forget of that thing (although checkor will recover from it)
print "setting the status of",wfo.status,"to forget"
wfo.status = 'forget'
session.commit()
else:
## do not go on like this, do not acknoledge it
print wfn,"is set to be rejected, but we do not know about it yet"
#continue
wfi = workflowInfo(url, wfn)
success = "not rejected"
## to do, we should find a way to reject the workflow and any related acdc
success = reqMgrClient.invalidateWorkflow(url, wfn, current_status = wfi.request['RequestStatus'])
## need to find the whole familly and reject the whole gang
familly = getWorkflowById( url, wfi.request['PrepID'] , details=True)
for fwl in familly:
## take out all acdc
if fwl['RequestDate'] < wfi.request['RequestDate']:continue
if fwl['RequestType']!='Resubmission': continue
print "rejecting",fwl['RequestName']
success = reqMgrClient.invalidateWorkflow(url, fwl['RequestName'], current_status=fwl['RequestStatus'])
print success
wfi.sendLog('invalidator',"rejection is performed from McM invalidations request")
acknowledge= True
text = "The workflow %s (%s) was rejected due to invalidation in McM" % ( wfn, pid )
batch_lookup = wfn ##so that the batch id is taken as the one containing the workflow name
elif invalid['type'] == 'dataset':
dataset = invalid['object']
if '?' in dataset: continue
if 'None' in dataset: continue
if 'None-' in dataset: continue
if 'FAKE-' in dataset: continue
print "setting",dataset,"to",invalid_status
success = setDatasetStatus(dataset , invalid_status )
if success:
acknowledge= True
text = "The dataset %s (%s) was set INVALID due to invalidation in McM" % ( dataset, pid )
else:
print "invalidation of",dataset,"did not go so well"
else:
print "\t\t",invalid['type']," type not recognized"
if acknowledge:
## acknoldge invalidation in mcm, provided we can have the api
print "acknowledgment to mcm"
mcm.get('/restapi/invalidations/acknowledge/%s'%( invalid['_id'] ))
# prepare the text for batches
batches = []
batches.extend(mcm.getA('batches',query='contains=%s'%batch_lookup))
batches = filter(lambda b : b['status'] in ['announced','done','reset'], batches)
if len(batches):
bid = batches[-1]['prepid']
print "batch nofication to",bid
text_to_batch[bid] += text+"\n\n"
# prepare the text for requests
text_to_request[pid] += text+"\n\n"
for bid,text in text_to_batch.items():
if not text: continue
text += '\n This is an automated message'
mcm.put('/restapi/batches/notify',{ "notes" : text, "prepid" : bid})
pass
for pid,text in text_to_request.items():
if not text: continue
text += '\n This is an automated message'
mcm.put('/restapi/requests/notify',{ "message" : text, "prepids" : [pid]})
示例12: stagor
def stagor(url,specific =None, options=None):
if not componentInfo().check(): return
SI = siteInfo()
CI = campaignInfo()
UC = unifiedConfiguration()
done_by_wf_id = {}
done_by_input = {}
completion_by_input = {}
good_enough = 100.0
lost = json.loads(open('lost_blocks_datasets.json').read())
still_lost = []
for dataset in lost:
l = findLostBlocks(url ,dataset)
if not l:
print dataset,"is not really lost"
else:
still_lost.append( dataset )
open('lost_blocks_datasets.json','w').write( json.dumps( still_lost, indent=2) )
if options.fast:
print "doing the fast check of staged with threshold:",options.goodavailability
for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all():
if specific and not specific in wfo.name: continue
wfi = workflowInfo(url, wfo.name)
sites_allowed = getSiteWhiteList( wfi.getIO() )
if 'SiteWhitelist' in CI.parameters(wfi.request['Campaign']):
sites_allowed = CI.parameters(wfi.request['Campaign'])['SiteWhitelist']
if 'SiteBlacklist' in CI.parameters(wfi.request['Campaign']):
sites_allowed = list(set(sites_allowed) - set(CI.parameters(wfi.request['Campaign'])['SiteBlacklist']))
_,primaries,_,secondaries = wfi.getIO()
se_allowed = [SI.CE_to_SE(site) for site in sites_allowed]
all_check = True
for dataset in list(primaries):#+list(secondaries) ?
#print se_allowed
available = getDatasetBlocksFraction( url , dataset , sites=se_allowed )
all_check &= (available >= options.goodavailability)
if not all_check: break
if all_check:
print "\t\t",wfo.name,"can go staged"
wfo.status = 'staged'
session.commit()
else:
print "\t",wfo.name,"can wait a bit more"
return
for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all():
wfi = workflowInfo(url, wfo.name)
_,primaries,_,secondaries = wfi.getIO()
for dataset in list(primaries)+list(secondaries):
done_by_input[dataset] = {}
completion_by_input[dataset] = {}
print wfo.name,"needs",dataset
for transfer in session.query(Transfer).all():
if specific and str(transfer.phedexid)!=str(specific): continue
skip=True
for wfid in transfer.workflows_id:
tr_wf = session.query(Workflow).get(wfid)
if tr_wf:
if tr_wf.status == 'staging':
print "\t",transfer.phedexid,"is staging for",tr_wf.name
skip=False
if skip: continue
if transfer.phedexid<0: continue
## check the status of transfers
checks = checkTransferApproval(url, transfer.phedexid)
approved = all(checks.values())
if not approved:
print transfer.phedexid,"is not yet approved"
approveSubscription(url, transfer.phedexid)
continue
## check on transfer completion
checks = checkTransferStatus(url, transfer.phedexid, nocollapse=True)
if not specific:
for dsname in checks:
if not dsname in done_by_input: done_by_input[dsname]={}
if not dsname in completion_by_input: completion_by_input[dsname] = {}
done_by_input[dsname][transfer.phedexid]=all(map(lambda i:i>=good_enough, checks[dsname].values()))
completion_by_input[dsname][transfer.phedexid]=checks[dsname].values()
if checks:
print "Checks for",transfer.phedexid,[node.values() for node in checks.values()]
done = all(map(lambda i:i>=good_enough,list(itertools.chain.from_iterable([node.values() for node in checks.values()]))))
else:
## it is empty, is that a sign that all is done and away ?
print "ERROR with the scubscriptions API of ",transfer.phedexid
print "Most likely something else is overiding the transfer request. Need to work on finding the replacement automatically, if the replacement exists"
done = False
## the thing above is NOT giving the right number
#done = False
#.........这里部分代码省略.........
示例13: getPileupDataset
def getPileupDataset(url, workflow):
return workflowInfo( url,workflow).getPileupDataset()
示例14: checkor
#.........这里部分代码省略.........
sleep_time = min(max(0.5, total_running_time / len(wfs)), 10)
random.shuffle( wfs )
in_manual = 0
## now you have a record of what file was invalidated globally from TT
TMDB_invalid = dataCache.get('file_invalidation')
#try:
# TMDB_invalid = set([row[3] for row in csv.reader( os.popen('curl -s "https://docs.google.com/spreadsheets/d/11fFsDOTLTtRcI4Q3gXw0GNj4ZS8IoXMoQDC3CbOo_2o/export?format=csv"'))])
# TMDB_invalid = map(lambda e : e.split(':')[-1], TMDB_invalid)
# print len(TMDB_invalid),"globally invalidated files"
#except Exception as e:
# print "TMDB not fetched"
# print str(e)
# TMDB_invalid = []
print len(wfs),"to consider, pausing for",sleep_time
max_per_round = UC.get('max_per_round').get('checkor',None)
if options.limit: max_per_round=options.limit
if max_per_round and not spec: wfs = wfs[:max_per_round]
for wfo in wfs:
if spec and not (spec in wfo.name): continue
time.sleep( sleep_time )
time_point("Starting with %s"% wfo.name)
## get info
wfi = workflowInfo(url, wfo.name)
wfi.sendLog('checkor',"checking on %s %s"%( wfo.name,wfo.status))
## make sure the wm status is up to date.
# and send things back/forward if necessary.
wfo.wm_status = wfi.request['RequestStatus']
if wfo.wm_status == 'closed-out':
## manually closed-out
wfi.sendLog('checkor',"%s is already %s, setting close"%( wfo.name , wfo.wm_status))
wfo.status = 'close'
session.commit()
continue
elif wfo.wm_status in ['failed','aborted','aborted-archived','rejected','rejected-archived','aborted-completed']:
## went into trouble
wfo.status = 'trouble'
wfi.sendLog('checkor',"%s is in trouble %s"%(wfo.name, wfo.wm_status))
session.commit()
continue
elif wfo.wm_status in ['assigned','acquired']:
## not worth checking yet
wfi.sendLog('checkor',"%s is not running yet"%wfo.name)
session.commit()
continue
if '-onhold' in wfo.status:
if wfo.name in holdings and wfo.name not in bypasses:
wfi.sendLog('checkor',"%s is on hold"%wfo.name)
continue
if wfo.wm_status != 'completed': #and not wfo.name in bypasses:
## for sure move on with closeout check if in completed
wfi.sendLog('checkor',"no need to check on %s in status %s"%(wfo.name, wfo.wm_status))
session.commit()
示例15: workflowInfo
from assignSession import *
import time
from utils import getWorkLoad, checkTransferStatus, workflowInfo
import pprint
import sys
url = 'cmsweb.cern.ch'
#wfi = workflowInfo( url, "pdmvserv_HIG-2019GEMUpg14DR-00116_00086_v0__150330_112405_8526")
wfi = workflowInfo( url, "pdmvserv_task_B2G-RunIIWinter15wmLHE-00001__v1_T_150402_161327_2265")
print wfi.getLumiWhiteList()
#print wfi.acquisitionEra()
#pid = sys.argv[1]
#tr = session.query(Transfer).filter(Transfer.phedexid== pid).first()
#for wfid in tr.workflows_id:
# wf = session.query(Workflow).get(wfid)
# print wf.id,wf.name
# wf.status = 'staging'
#session.commit()
#checks = checkTransferStatus( 'cmsweb.cern.ch',440053 )
#print checks
#tr = session.query(Transfer).filter(Transfer.phedexid== 440100 ).first()
#session.delete( tr )
#session.commit()
#wf = session.query(Workflow).filter(Workflow.name=="pdmvserv_TOP-Summer12DR53X-00302_00379_v0__150331_100923_4420"
# ).first()