當前位置: 首頁>>代碼示例>>Python>>正文


Python WorkflowSpec.outputDatasets方法代碼示例

本文整理匯總了Python中ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.outputDatasets方法的典型用法代碼示例。如果您正苦於以下問題:Python WorkflowSpec.outputDatasets方法的具體用法?Python WorkflowSpec.outputDatasets怎麽用?Python WorkflowSpec.outputDatasets使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec的用法示例。


在下文中一共展示了WorkflowSpec.outputDatasets方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: JobQueueDB

# 需要導入模塊: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 別名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 別名]
jobQ = JobQueueDB()
jobQ.removeWorkflow(workflowSpec.workflowName())


#  //
# // workflow entities
#//
jobs = WEWorkflow.getJobIDs(workflowSpec.workflowName())
WEAux.removeJob(jobs)
WEAux.removeWorkflow(workflowSpec.workflowName())

#  //
# // merge sensor
#//
mergeDB = MergeSensorDB()
mergeDatasets = workflowSpec.outputDatasets()

for d in mergeDatasets:
    try:
        mergeDB.removeDataset(d.name()) 
    except Exception, ex:
        print "Skipping %s: %s" % (d, ex)
mergeDB.commit()

Session.commit_all()
Session.close_all()




開發者ID:giffels,項目名稱:PRODAGENT,代碼行數:28,代碼來源:removeWorkflow.py

示例2: __init__

# 需要導入模塊: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 別名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 別名]
class ResultsStatus:
    """
    _ResultsStatus_

    Object to retrieve and compute the overall state of a Results
    Workflow

    """
    def __init__(self, config, msgSvcRef,  **workflowDetails):
        self.configuration = config
        self.msgSvcRef = msgSvcRef
        self.workflowDetails = workflowDetails
        self.workflow = workflowDetails['id']
        self.workflowFile = workflowDetails['workflow_spec_file']
        self.workflowSpec = WorkflowSpec()
        self.workflowSpec.load(self.workflowFile)

        self.doMigration = self.configuration.get("MigrateToGlobal", True)
        self.doInjection = self.configuration.get("InjectToPhEDEx", True)

    def __call__(self):
        """
        _operator()_

        Evaluate the status of this workflow from the WorkflowEntities
        data and publish any events that are triggered

        """

        if self.processingComplete():
            logging.info("Processing Complete for %s" % self.workflow)
            for dataset in self.unmergedDatasets():
                if self.doMigration:
                    logging.debug(
                        "Publishing MigrateToGlobal for %s" % dataset)
                    self.msgSvcRef.publish(
                        "DBSInterface:MigrateDatasetToGlobal",
                        dataset)
                    self.msgSvcRef.commit()
                if self.doInjection:
                    logging.debug("Publishing PollMigration for %s" % dataset)
                    self.msgSvcRef.publish("StoreResultsAccountant:PollMigration",
                                           self.workflowFile, "00:02:00")
                    self.msgSvcRef.commit()

            Session.commit_all()


            WEWorkflow.setFinished(self.workflow)
            WEWorkflow.remove(self.workflow)
            Session.commit_all()

        return


    def processingComplete(self):
        """
        _processingComplete_

        look at the processing jobs for the workflow, and return True
        if all processing jobs are complete

        """
        intermediateDBS = self.workflowSpec.parameters['DBSURL']
        outputDataset   = self.workflowSpec.outputDatasets()[0].name()

        allJobs      = WEUtils.jobsForWorkflow(self.workflow, "Merge")
        finishedJobs = WEUtils.jobsForWorkflow(self.workflow, "Merge", "finished")
        totalProcessing = len(allJobs)
        totalComplete   = len(finishedJobs)

        logging.info("%s: %s/%s jobs complete" %
                      (self.workflow,totalComplete,totalProcessing))

        if totalProcessing == 0: # Protection for non-sensical situation
            return False

        if totalComplete < totalProcessing:
            return False

        # Check to make sure local DBS knows about all output files
        try:
            reader = DBSReader(intermediateDBS)
            blockList = reader.getFiles(dataset = outputDataset)
        except:
            logging.info("Dataset not in DBS yet")
            return False

        totalRegistered = 0
        for block in blockList:
            totalRegistered += len(blockList[block]['Files'])

        logging.info("%s: %s/%s jobs registered" %
                      (self.workflow,totalRegistered,totalProcessing))
        if totalRegistered < totalProcessing:
            return False

        return True


#.........這裏部分代碼省略.........
開發者ID:giffels,項目名稱:PRODAGENT,代碼行數:103,代碼來源:ResultsStatus.py

示例3: WorkflowSpec

# 需要導入模塊: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 別名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 別名]
print "Updating DB for workflow: ", workflowFile

# read the WorkflowSpecFile
try:
    wfile = WorkflowSpec()
    wfile.load(workflowFile)

# wrong dataset file
except Exception, msg:
    print "Error loading workflow specifications from %s: %s" \
          % (workflowFile, msg)
    sys.exit(1)

# get output modules
try:
    outputDatasetsList = wfile.outputDatasets()

    outputModules = [outDS['OutputModuleName'] \
                     for outDS in outputDatasetsList]

    # remove duplicates
    outputModulesList = {}
    for module in outputModules:
        outputModulesList[module] = module
    outputModulesList = outputModulesList.values()

except (IndexError, KeyError):
    print "wrong output dataset specification"

# create a dataset instances for each output module
for outputModule in outputModulesList:
開發者ID:giffels,項目名稱:PRODAGENT,代碼行數:33,代碼來源:fixDB.py

示例4: createMergeJobWorkflow

# 需要導入模塊: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 別名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import outputDatasets [as 別名]
mergeProdSpecs = createMergeJobWorkflow(workflowSpec)
prodFactory = RequestJobFactory(workflowSpec, productionDir, productionEvents)

for mergeDS, mergeSpec in mergeProdSpecs.items():
    mrgSpecFile = "%s/%s.xml" % (mergeProdDir, mergeDS.replace("/", "_"))
    mergeSpec.save(mrgSpecFile)


#  //
# // make production job definitions
#//
prodJobs = prodFactory()

prodToMergeDatasets = {}
prodFiles = {}
for prodDataset in workflowSpec.outputDatasets():
    dsName = prodDataset.name()
    prodFiles[dsName] = set()
    prodToMergeDatasets[dsName] = mergeProdSpecs[dsName]



emulator2 = EmulatorReportPlugin()

wnInfo = {
    "SiteName" : "TN_SITE_CH",
    "HostID"   : "host" ,
    "HostName" : "workernode.element.edu",
    "se-name"  : "storage.element.edu",
    "ce-name"  : "compute.element.edu",
}
開發者ID:giffels,項目名稱:PRODAGENT,代碼行數:33,代碼來源:syntheticProduction.py


注:本文中的ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.outputDatasets方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。