当前位置: 首页>>代码示例>>Python>>正文


Python WorkflowSpec.load方法代码示例

本文整理汇总了Python中ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.load方法的典型用法代码示例。如果您正苦于以下问题:Python WorkflowSpec.load方法的具体用法?Python WorkflowSpec.load怎么用?Python WorkflowSpec.load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec的用法示例。


在下文中一共展示了WorkflowSpec.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
class JobSpecExpander:

    def __init__(self, jobSpecFile):
        self.jobSpec = JobSpec()
        self.jobSpec.load(jobSpecFile)
        self.taskState = TaskState(os.getcwd())
        self.taskState.loadRunResDB()
        self.workflowSpec = WorkflowSpec()
        self.workflowSpec.load(os.environ["PRODAGENT_WORKFLOW_SPEC"])
        
        self.config = self.taskState.configurationDict()

        finder = NodeFinder(self.taskState.taskName())
        self.jobSpec.payload.operate(finder)
        self.jobSpecNode = finder.result

        wffinder = NodeFinder(self.taskState.taskName())
        self.workflowSpec.payload.operate(wffinder)
        self.workflowNode = wffinder.result

        if self.jobSpecNode.jobType != "Merge":
            if self.config.has_key('Configuration'):
                try:
                    self.createPSet()
                except Exception, ex:
                    msg = "Unable to generate cmsRun Config from JobSpec:\n"
                    msg += str(ex)
                    print msg
                    badfile = open("exit.status", 'w')
                    badfile.write("10040")
                    badfile.close()
        else:
开发者ID:giffels,项目名称:PRODAGENT,代码行数:34,代码来源:RuntimePSetPrep.py

示例2: getCMSSoft

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
def getCMSSoft(work,reverse=False):
    """
    opens the workflowfile and gets the CMSSoft version
    if reverse, returns a map between CMSSoft version and real workflowname
    """

    new_work={}
    workflowSpec = WorkflowSpec()
    for fil in work:
        try:
            workflowSpec.load(fil)
            cmssw=workflowSpec.payload.application['Version']
            name=workflowSpec.parameters['WorkflowName']
            if reverse:
                if not new_work.has_key(cmssw):
                    new_work[cmssw]=[]
                    new_work[cmssw].append(name)
            else:
                new_work[name]=cmssw
        except:
            """
            something went wrong

            """
            msg="WorkflowConstraints getCMSSoft: something went wrong while handling file "+fil
            print(msg)

    return new_work
开发者ID:giffels,项目名称:PRODAGENT,代码行数:30,代码来源:WorkflowConstraints.py

示例3: add

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
    def add(self, workflowFile):
        """
        _add_
        
        Add a dataset to the list of watched datasets.
        
        Arguments:
            
          workflowFile -- the workflow specification file
        
        Return:
            
          the datasetId

        """

        # read the WorkflowSpecFile
        try:
            wfile = WorkflowSpec()
            wfile.load(workflowFile)

        # wrong dataset file
        except Exception, msg:
            raise InvalidDataset, \
                  "Error loading workflow specifications from %s" % workflowFile
开发者ID:giffels,项目名称:PRODAGENT,代码行数:27,代码来源:WatchedDatasets.py

示例4: createWorkflow

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
    def createWorkflow(self, runNumber, primaryDataset,
                       processedDataset, dataTier):
        """
        _createWorkflow_

        Create a workflow for a given run and primary dataset.  If the workflow
        has been created previously, load it and use it.
        """
        jobCache = os.path.join(self.args["ComponentDir"], "T0ASTPlugin",
                                "Run" + runNumber)
        if not os.path.exists(jobCache):
            os.makedirs(jobCache)

        workflowSpecFileName = "DQMHarvest-Run%s-%s-workflow.xml" % (runNumber, primaryDataset)
        workflowSpecPath = os.path.join(jobCache, workflowSpecFileName)

        if os.path.exists(workflowSpecPath):
            msg = "Loading existing workflow for dataset: %s\n " % primaryDataset
            msg += " => %s\n" % workflowSpecPath
            logging.info(msg)

            workflowSpec = WorkflowSpec()
            workflowSpec.load(workflowSpecPath)
            return (workflowSpec, workflowSpecPath)
            
        msg = "No workflow found for dataset: %s\n " % primaryDataset
        msg += "Looking up software version and generating workflow..."

        recoConfig = self.t0astWrapper.listRecoConfig(runNumber, primaryDataset)

        if not recoConfig["DO_RECO"]:
            logging.info("RECO disabled for dataset %s" % primaryDataset)
            return (None, None)

        globalTag = self.args.get("OverrideGlobalTag", None)
        if globalTag == None:
            globalTag = recoConfig["GLOBAL_TAG"]
            
        cmsswVersion = self.args.get("OverrideCMSSW", None)
        if cmsswVersion == None:
            cmsswVersion = recoConfig["CMSSW_VERSION"]

        datasetPath = "/%s/%s/%s" % (primaryDataset, processedDataset, dataTier)
        workflowSpec = createHarvestingWorkflow(datasetPath, self.site, 
                                                self.args["CmsPath"],
                                                self.args["ScramArch"],
                                                cmsswVersion, globalTag,
                                                configFile=self.args["ConfigFile"],
                                                DQMServer=self.args['DQMServer'],
                                                proxyLocation=self.args['proxyLocation'],
                                                DQMCopyToCERN=self.args['DQMCopyToCERN'],
                                                doStageOut=self.args['DoStageOut'])
        
        
        workflowSpec.save(workflowSpecPath)
        msg = "Created Harvesting Workflow:\n %s" % workflowSpecPath
        logging.info(msg)
        self.publishWorkflow(workflowSpecPath, workflowSpec.workflowName())
        return (workflowSpec, workflowSpecPath)
开发者ID:giffels,项目名称:PRODAGENT,代码行数:61,代码来源:T0ASTPlugin.py

示例5: makeJobs

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
    def makeJobs(self, testInstance):
        """
        _makeJobs_

        Create Job Specs for the test instance provided

        """
        logging.info("Creating Jobs for test %s at site %s" % (
            testInstance['Name'],
            testInstance['Site'])
                     )
        testName = testInstance['WorkflowSpecId']
        specInstance = WorkflowSpec()
        specInstance.load(testInstance['WorkflowSpecFile'])

        if testInstance['InputDataset'] == None:
            initialRun = self.jobCounts.get(testInstance['Name'], 1)
            factory = RequestJobFactory(
                specInstance,
                testInstance['WorkingDir'],
                testInstance['TotalEvents'],
                InitialRun = initialRun,
                EventsPerJob = testInstance['EventsPerJob'],
                Sites = [testInstance['Site']])

            jobsList = factory()
            self.jobCounts[testInstance['Name']] += len(jobsList)
        else:

            factory = DatasetJobFactory(
                specInstance,
                testInstance['WorkingDir'],
                specInstance.parameters['DBSURL'],
                )

            jobsList = factory()
            self.jobCounts[testInstance['Name']] += len(jobsList)


        msg = "Created %s jobs:\n" % len(jobsList)

        for job in jobsList:
            jobSpecFile = job['JobSpecFile']
            jobSpecId = job['JobSpecId']
            msg += "  %s\n" % jobSpecId
            testInstance['JobSpecs'][jobSpecId] = jobSpecFile



        logging.info(msg)



        return
开发者ID:giffels,项目名称:PRODAGENT,代码行数:56,代码来源:RelValSpecMgr.py

示例6: GoodWorkflow

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
def GoodWorkflow(workflow):
   """
   Check if workflow can be loaded
   """
   RequestDir,firstrun = getRequestInjectorConfig()
   workflowCache="%s/WorkflowCache"%RequestDir
   workflowSpec = WorkflowSpec()
   try:
      workflowSpec.load(workflow)
   except:
      return False
   return True
开发者ID:giffels,项目名称:PRODAGENT,代码行数:14,代码来源:InjectTestLCG.py

示例7: __init__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
class FactoryInterface:
    """
    _FactoryInterface_

    JobSpec Factory Interface defintion & common utils for
    all job spec factory generators

    """
    def __init__(self, workflowSpec):
        # or use isinstance(WorkflowSpec) if need to include sub classes
        if workflowSpec.__class__ is WorkflowSpec:
            self.workflow = workflowSpec
        else:
            self.workflow = WorkflowSpec()
            self.workflow.load(workflowSpec)
开发者ID:TonyWildish,项目名称:CSA06-T0-prototype,代码行数:17,代码来源:FactoryInterface.py

示例8: loadWorkflow

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
    def loadWorkflow(self, specFile):
        """
        _loadWorkflow_

        Helper method, since every plugin will have to do
        something with a workflow

        """
        spec = WorkflowSpec()
        try:
            spec.load(specFile)
        except Exception, ex:
            msg = "Unable to read workflow spec file:\n%s\n" % specFile
            msg += str(ex)
            raise RuntimeError, msg
开发者ID:giffels,项目名称:PRODAGENT,代码行数:17,代码来源:PluginInterface.py

示例9: createJobSpec

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
def createJobSpec(jobSpecId,workflowSpecFile, filename, runNumber, eventCount,  firstEvent = None,saveString=False,loadString=True):

    #  //
    # // Load workflow
    #//
    workflowSpec = WorkflowSpec()
    if loadString:
        workflowSpec.loadString(workflowSpecFile)
    else:
        workflowSpec.load(workflowSpecFile)

    

    #  //
    # // Create JobSpec
    #//
    jobSpec = workflowSpec.createJobSpec()
    jobName = "%s-%s" % (
        workflowSpec.workflowName(),
        runNumber
            )


    #jobSpec.setJobName(jobName)
    jobSpec.setJobName(jobSpecId)
    jobSpec.setJobType("Processing")
    jobSpec.parameters['RunNumber'] = runNumber
    jobSpec.parameters['EventCount'] = eventCount

    jobSpec.payload.operate(DefaultLFNMaker(jobSpec))

    if firstEvent != None:
        jobSpec.parameters['FirstEvent'] = firstEvent

    cfgMaker = ConfigGenerator(jobSpec)
    jobSpec.payload.operate(cfgMaker)

    if saveString:    
       return jobSpec.saveString()
    jobSpec.save(filename)
    return
开发者ID:PerilousApricot,项目名称:CRAB2,代码行数:43,代码来源:EventJobSpec.py

示例10: WorkflowSpec

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
import sys, os

from ProdAgentDB.Config import defaultConfig as dbConfig
from ProdCommon.Database import Session
from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec
from ProdAgentCore.Configuration import loadProdAgentConfiguration

from MergeSensor.MergeSensorDB import MergeSensorDB
from JobQueue.JobQueueDB import JobQueueDB
import ProdAgent.WorkflowEntities.Aux as WEAux
import ProdAgent.WorkflowEntities.Workflow as WEWorkflow

workflow = sys.argv[1]
workflowSpec = WorkflowSpec()
workflowSpec.load(workflow)

#  //
# // Clean out job cache
#//
config = loadProdAgentConfiguration()
compCfg = config.getConfig("JobCreator")
creatorCache = os.path.expandvars(compCfg['ComponentDir'])

workflowCache = os.path.join(creatorCache, workflowSpec.workflowName())
if os.path.exists(workflowCache):
    os.system("/bin/rm -rf %s" % workflowCache)



Session.set_database(dbConfig)
开发者ID:giffels,项目名称:PRODAGENT,代码行数:32,代码来源:removeWorkflow.py

示例11: TaskState

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
import StageOut.Utilities as StageOutUtils
from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec

if __name__ == '__main__':
    msg = "******RuntimeStageOutFailure Invoked*****"

    state = TaskState(os.getcwd())
    state.loadRunResDB()
    config = state.configurationDict()

    #  //
    # // find inputs by locating the task for which we are staging out
    #//  and loading its TaskState

    workflow = WorkflowSpec()
    workflow.load(os.environ['PRODAGENT_WORKFLOW_SPEC'])
    stageOutFor, override, controls = StageOutUtils.getStageOutConfig(
        workflow, state.taskName())

    inputTasks = stageOutFor
    for inputTask in inputTasks:

        inputState = getTaskState(inputTask)
        if inputState == None:
            msg = "Input State: %s Not found, skipping..." % inputTask
            continue

        inputReport = inputState.getJobReport()

        inputReport.status = "Failed"
        if inputReport.exitCode in (0, "0"):
开发者ID:giffels,项目名称:PRODAGENT,代码行数:33,代码来源:RuntimeStageOutFailure.py

示例12: __init__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
class RepackerSetup:
    """
    _RepackerSetup_

    Object to manipulate the Configuration files for a repacker job

    - Extract the details of the repacker job entity stored in the
      config

    - Pull in the lumi server information and add it to the config

    """
    def __init__(self, workflowSpec, jobSpec):

        self.workflowSpec = WorkflowSpec()
        self.workflowSpec.load(workflowSpec)

        self.jobSpec = JobSpec()
        self.jobSpec.load(jobSpec)

        taskState = TaskState(os.getcwd())
        taskState.loadRunResDB()

        jobSpecFinder = NodeFinder(taskState.taskName())
        self.jobSpec.payload.operate(jobSpecFinder)
        self.jobSpecNode = jobSpecFinder.result

        workflowFinder = NodeFinder(taskState.taskName())
        self.workflowSpec.payload.operate(workflowFinder)
        self.workflowNode = workflowFinder.result

        self.run = None
        self.lumis = []
        self.streamerFiles = []
        self.activeDatasets = []


    def unpackJobEntity(self):
        """
        _unpackJobEntity_

        Get the StreamerJobEntity from the JobSpec node

        """

        repackJobEntity = self.jobSpecNode.cfgInterface.extensions.get('Streamer', None)
        if repackJobEntity == None:
            msg = "No StreamerJobEntity in JobSpec configuration\n"
            msg += "This is required for repacker jobs\n"
            raise RuntimeError, msg

        # Get run and lumi numbers for this job
        self.run = repackJobEntity.data['runNumber']
        self.lumis = repackJobEntity.data['lumiSections']
        print "Repacker Job Handling Run:%s\n LumiSections: %s\n" % (self.run,self.lumis)

        # Sort streamer input by lumi ID for time ordering
        self.streamerFiles = sortByValue(repackJobEntity.data['streamerFiles'])
        msg = "Streamer Files for this job are:\n"
        for strmr in self.streamerFiles:
            msg += "  %s\n" % strmr
        print msg

        # Get list of active datasets for this job
##        self.activeDatasets = repackJobEntity.data['activeOutputModules']
##        msg = "This Job Will repack datasets:\n"
##        for dataset in self.activeDatasets:
##            msg += "  %s\n" % dataset
##        print msg

        return


    def backupPSet(self,filename,process):
        """
        _backupPSet_
        
        Write a backup copy of the current PSet to disk.
        """
        print "Wrote current configurations as %s" % filename
        handle = open(filename, 'w')
        handle.write("import pickle\n")
        handle.write("pickledCfg=\"\"\"%s\"\"\"\n" % pickle.dumps(process))
        handle.write("process = pickle.loads(pickledCfg)\n")
        handle.close()

        return


    def importAndBackupProcess(self):
        """
        _importAndBackupProcess_
        
        Try to import the process object for the job, which is contained in
        PSet.py and save a backup copy of it.
        """
        try:
            from PSet import process
        except ImportError, ex:
            msg = "Failed to import PSet module containing cmsRun Config\n"
#.........这里部分代码省略.........
开发者ID:TonyWildish,项目名称:CSA06-T0-prototype,代码行数:103,代码来源:RuntimeRepacker.py

示例13: MergeSensorDB

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
logHandler.setFormatter(logFormatter)
logging.getLogger().addHandler(logHandler)
logging.getLogger().setLevel(logging.INFO)
Dataset.setLogging(logging)

database = MergeSensorDB()
Dataset.setDatabase(database)

workflowFile = sys.argv[1]

print "Updating DB for workflow: ", workflowFile

# read the WorkflowSpecFile
try:
    wfile = WorkflowSpec()
    wfile.load(workflowFile)

# wrong dataset file
except Exception, msg:
    print "Error loading workflow specifications from %s: %s" \
          % (workflowFile, msg)
    sys.exit(1)

# get output modules
try:
    outputDatasetsList = wfile.outputDatasets()

    outputModules = [outDS['OutputModuleName'] \
                     for outDS in outputDatasetsList]

    # remove duplicates
开发者ID:giffels,项目名称:PRODAGENT,代码行数:33,代码来源:fixDB.py

示例14: __call__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
    def __call__(self, collectPayload):
        """
        _operator(collectPayload)_

        Given the dataset in the payload, callout to DBS
        to find the files to be harvested

        """
        msg = "RelValPlugin invoked for %s" % str(collectPayload)
        logging.info(msg)

        if collectPayload.get('Scenario', None) is None:
            msg = "RelValPlugin: Payload should provide a scenario."
            raise RuntimeError, msg    

        site = self.args.get("Site", "srm-cms.cern.ch")

        baseCache = os.path.join(self.args['ComponentDir'],
                                 "RelValPlugin")
        if not os.path.exists(baseCache):
            os.makedirs(baseCache)

        datasetCache = os.path.join(baseCache,
                                    collectPayload['PrimaryDataset'],
                                    collectPayload['ProcessedDataset'],
                                    collectPayload['DataTier'])

        if not os.path.exists(datasetCache):
            os.makedirs(datasetCache)

        workflowFile = os.path.join(
            datasetCache,
            "%s-%s-%s-DQMHarvest-Workflow.xml" % (
            collectPayload['PrimaryDataset'],
            collectPayload['ProcessedDataset'],
            collectPayload['DataTier'])
            )

        if not os.path.exists(workflowFile):
            msg = "No workflow found for dataset: %s\n " % (
                collectPayload.datasetPath())
            msg += "Looking up software version and generating workflow..."
            logging.info(msg)

            # Override Global Tag?
            if self.args.get("OverrideGlobalTag", None) is not None:
                globalTag = self.args['OverrideGlobalTag']
                msg = "Using Overrride for Global: %s" % globalTag
                logging.info(msg)
            # Global Tag provided in the payload?
            elif collectPayload.get('GlobalTag', None) is not None:
                globalTag = collectPayload['GlobalTag']
                msg = "Global tag found in payload: %s" % globalTag
                logging.info(msg)
            # Look up in DBS for Global Tag, use fallback GT as last resort
            else:
                globalTag = findGlobalTagForDataset(
                    self.dbsUrl,
                    collectPayload['PrimaryDataset'],
                    collectPayload['ProcessedDataset'],
                    collectPayload['DataTier'])

            # Override CMSSW Version
            if self.args.get("OverrideCMSSW", None) is not None:
                cmsswVersion = self.args['OverrideCMSSW']
                msg = "Using Override for CMSSW Version %s" % (
                    self.args['OverrideCMSSW'],)
                logging.info(msg)
            # CMSSW Version provided in the payload?
            elif collectPayload.get('CMSSWVersion', None) is not None:
                cmsswVersion = collectPayload['CMSSWVersion']
                msg = "CMSSW Version found in payload: %s" % cmsswVersion
                logging.info(msg)
            else:
                cmsswVersion = findVersionForDataset(
                    self.dbsUrl,
                    collectPayload['PrimaryDataset'],
                    collectPayload['ProcessedDataset'],
                    collectPayload['DataTier'],
                    collectPayload['RunNumber'])
                msg = "CMSSW Version for dataset/run\n"
                msg += " Dataset %s\n" % collectPayload.datasetPath()
                msg += " CMSSW Version = %s\n " % cmsswVersion
                logging.info(msg)

            workflowSpec = createHarvestingWorkflow(
                collectPayload.datasetPath(),
                site,
                self.args['CmsPath'],
                self.args['ScramArch'],
                cmsswVersion,
                globalTag,
                configFile=self.args['ConfigFile'],
                DQMServer=self.args['DQMServer'],
                proxyLocation=self.args['proxyLocation'],
                DQMCopyToCERN=self.args['DQMCopyToCERN'],
                doStageOut=self.args['DoStageOut'])
            
            workflowSpec.save(workflowFile)
            msg = "Created Harvesting Workflow:\n %s" % workflowFile
#.........这里部分代码省略.........
开发者ID:giffels,项目名称:PRODAGENT,代码行数:103,代码来源:RelValPlugin.py

示例15: __init__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import load [as 别名]
class DatasetIterator:
    """
    _DatasetIterator_

    Working from a Generic Workflow template, generate
    concrete jobs from it, keeping in-memory history

    """
    def __init__(self, workflowSpecFile, workingDir):
        self.workflow = workflowSpecFile
        self.workingDir = workingDir
        self.currentJob = None
        self.workflowSpec = WorkflowSpec()
        self.workflowSpec.load(workflowSpecFile)
        self.currentJobDef = None
        self.count = 0
        self.onlyClosedBlocks = False
        if  self.workflowSpec.parameters.has_key("OnlyClosedBlocks"):
            onlyClosed =  str(
                self.workflowSpec.parameters["OnlyClosedBlocks"]).lower()
            if onlyClosed == "true":
                self.onlyClosedBlocks = True
        self.ownedJobSpecs = {}
        self.allowedBlocks = []
        self.allowedSites = []
        self.dbsUrl = getLocalDBSURL()
        self.splitType = \
                self.workflowSpec.parameters.get("SplitType", "file").lower()
        self.splitSize = int(self.workflowSpec.parameters.get("SplitSize", 1))

        self.generators = GeneratorMaker()
        self.generators(self.workflowSpec.payload)

        self.pileupDatasets = {}
        #  //
        # // Does the workflow contain a block restriction??
        #//
        blockRestriction = \
             self.workflowSpec.parameters.get("OnlyBlocks", None)
        if blockRestriction != None:
            #  //
            # // restriction on blocks present, populate allowedBlocks list
            #//
            msg = "Block restriction provided in Workflow Spec:\n"
            msg += "%s\n" % blockRestriction
            blockList = blockRestriction.split(",")
            for block in blockList:
                if len(block.strip() ) > 0:
                    self.allowedBlocks.append(block.strip())

        #  //
        # // Does the workflow contain a site restriction??
        #//
        siteRestriction = \
           self.workflowSpec.parameters.get("OnlySites", None)          
        if siteRestriction != None:
            #  //
            # // restriction on sites present, populate allowedSites list
            #//
            msg = "Site restriction provided in Workflow Spec:\n"
            msg += "%s\n" % siteRestriction
            siteList = siteRestriction.split(",")
            for site in siteList:
                if len(site.strip() ) > 0:
                    self.allowedSites.append(site.strip())

        #  //
        # // Is the DBSURL contact information provided??
        #//

        value = self.workflowSpec.parameters.get("DBSURL", None)
        if value != None:
            self.dbsUrl = value

        if self.dbsUrl == None:
            msg = "Error: No DBSURL available for dataset:\n"
            msg += "Cant get local DBSURL and one not provided with workflow"
            raise RuntimeError, msg
            
        #  //
        # // Cache Area for JobSpecs
        #//
        self.specCache = os.path.join(
            self.workingDir,
            "%s-Cache" %self.workflowSpec.workflowName())
        if not os.path.exists(self.specCache):
            os.makedirs(self.specCache)
        
        
    def __call__(self, jobDef):
        """
        _operator()_

        When called generate a new concrete job payload from the
        generic workflow and return it.
        The JobDef should be a JobDefinition with the input details
        including LFNs and event ranges etc.

        """
        newJobSpec = self.createJobSpec(jobDef)
#.........这里部分代码省略.........
开发者ID:giffels,项目名称:PRODAGENT,代码行数:103,代码来源:DatasetIterator.py


注:本文中的ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。