本文整理汇总了Python中ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.outputDatasets方法的典型用法代码示例。如果您正苦于以下问题:Python WorkflowSpec.outputDatasets方法的具体用法?Python WorkflowSpec.outputDatasets怎么用?Python WorkflowSpec.outputDatasets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec
的用法示例。
在下文中一共展示了WorkflowSpec.outputDatasets方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: JobQueueDB
# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 别名]
jobQ = JobQueueDB()
jobQ.removeWorkflow(workflowSpec.workflowName())
# //
# // workflow entities
#//
jobs = WEWorkflow.getJobIDs(workflowSpec.workflowName())
WEAux.removeJob(jobs)
WEAux.removeWorkflow(workflowSpec.workflowName())
# //
# // merge sensor
#//
mergeDB = MergeSensorDB()
mergeDatasets = workflowSpec.outputDatasets()
for d in mergeDatasets:
try:
mergeDB.removeDataset(d.name())
except Exception, ex:
print "Skipping %s: %s" % (d, ex)
mergeDB.commit()
Session.commit_all()
Session.close_all()
示例2: __init__
# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 别名]
class ResultsStatus:
"""
_ResultsStatus_
Object to retrieve and compute the overall state of a Results
Workflow
"""
def __init__(self, config, msgSvcRef, **workflowDetails):
self.configuration = config
self.msgSvcRef = msgSvcRef
self.workflowDetails = workflowDetails
self.workflow = workflowDetails['id']
self.workflowFile = workflowDetails['workflow_spec_file']
self.workflowSpec = WorkflowSpec()
self.workflowSpec.load(self.workflowFile)
self.doMigration = self.configuration.get("MigrateToGlobal", True)
self.doInjection = self.configuration.get("InjectToPhEDEx", True)
def __call__(self):
"""
_operator()_
Evaluate the status of this workflow from the WorkflowEntities
data and publish any events that are triggered
"""
if self.processingComplete():
logging.info("Processing Complete for %s" % self.workflow)
for dataset in self.unmergedDatasets():
if self.doMigration:
logging.debug(
"Publishing MigrateToGlobal for %s" % dataset)
self.msgSvcRef.publish(
"DBSInterface:MigrateDatasetToGlobal",
dataset)
self.msgSvcRef.commit()
if self.doInjection:
logging.debug("Publishing PollMigration for %s" % dataset)
self.msgSvcRef.publish("StoreResultsAccountant:PollMigration",
self.workflowFile, "00:02:00")
self.msgSvcRef.commit()
Session.commit_all()
WEWorkflow.setFinished(self.workflow)
WEWorkflow.remove(self.workflow)
Session.commit_all()
return
def processingComplete(self):
"""
_processingComplete_
look at the processing jobs for the workflow, and return True
if all processing jobs are complete
"""
intermediateDBS = self.workflowSpec.parameters['DBSURL']
outputDataset = self.workflowSpec.outputDatasets()[0].name()
allJobs = WEUtils.jobsForWorkflow(self.workflow, "Merge")
finishedJobs = WEUtils.jobsForWorkflow(self.workflow, "Merge", "finished")
totalProcessing = len(allJobs)
totalComplete = len(finishedJobs)
logging.info("%s: %s/%s jobs complete" %
(self.workflow,totalComplete,totalProcessing))
if totalProcessing == 0: # Protection for non-sensical situation
return False
if totalComplete < totalProcessing:
return False
# Check to make sure local DBS knows about all output files
try:
reader = DBSReader(intermediateDBS)
blockList = reader.getFiles(dataset = outputDataset)
except:
logging.info("Dataset not in DBS yet")
return False
totalRegistered = 0
for block in blockList:
totalRegistered += len(blockList[block]['Files'])
logging.info("%s: %s/%s jobs registered" %
(self.workflow,totalRegistered,totalProcessing))
if totalRegistered < totalProcessing:
return False
return True
#.........这里部分代码省略.........
示例3: WorkflowSpec
# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 别名]
print "Updating DB for workflow: ", workflowFile
# read the WorkflowSpecFile
try:
wfile = WorkflowSpec()
wfile.load(workflowFile)
# wrong dataset file
except Exception, msg:
print "Error loading workflow specifications from %s: %s" \
% (workflowFile, msg)
sys.exit(1)
# get output modules
try:
outputDatasetsList = wfile.outputDatasets()
outputModules = [outDS['OutputModuleName'] \
for outDS in outputDatasetsList]
# remove duplicates
outputModulesList = {}
for module in outputModules:
outputModulesList[module] = module
outputModulesList = outputModulesList.values()
except (IndexError, KeyError):
print "wrong output dataset specification"
# create a dataset instances for each output module
for outputModule in outputModulesList:
示例4: createMergeJobWorkflow
# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 别名]
mergeProdSpecs = createMergeJobWorkflow(workflowSpec)
prodFactory = RequestJobFactory(workflowSpec, productionDir, productionEvents)
for mergeDS, mergeSpec in mergeProdSpecs.items():
mrgSpecFile = "%s/%s.xml" % (mergeProdDir, mergeDS.replace("/", "_"))
mergeSpec.save(mrgSpecFile)
# //
# // make production job definitions
#//
prodJobs = prodFactory()
prodToMergeDatasets = {}
prodFiles = {}
for prodDataset in workflowSpec.outputDatasets():
dsName = prodDataset.name()
prodFiles[dsName] = set()
prodToMergeDatasets[dsName] = mergeProdSpecs[dsName]
emulator2 = EmulatorReportPlugin()
wnInfo = {
"SiteName" : "TN_SITE_CH",
"HostID" : "host" ,
"HostName" : "workernode.element.edu",
"se-name" : "storage.element.edu",
"ce-name" : "compute.element.edu",
}