当前位置: 首页>>代码示例>>Python>>正文


Python WorkflowSpec.createJobSpec方法代码示例

本文整理汇总了Python中ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.createJobSpec方法的典型用法代码示例。如果您正苦于以下问题:Python WorkflowSpec.createJobSpec方法的具体用法?Python WorkflowSpec.createJobSpec怎么用?Python WorkflowSpec.createJobSpec使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec的用法示例。


在下文中一共展示了WorkflowSpec.createJobSpec方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: createJobSpec

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import createJobSpec [as 别名]
def createJobSpec(jobSpecId,workflowSpecFile, filename, runNumber, eventCount,  firstEvent = None,saveString=False,loadString=True):

    #  //
    # // Load workflow
    #//
    workflowSpec = WorkflowSpec()
    if loadString:
        workflowSpec.loadString(workflowSpecFile)
    else:
        workflowSpec.load(workflowSpecFile)

    

    #  //
    # // Create JobSpec
    #//
    jobSpec = workflowSpec.createJobSpec()
    jobName = "%s-%s" % (
        workflowSpec.workflowName(),
        runNumber
            )


    #jobSpec.setJobName(jobName)
    jobSpec.setJobName(jobSpecId)
    jobSpec.setJobType("Processing")
    jobSpec.parameters['RunNumber'] = runNumber
    jobSpec.parameters['EventCount'] = eventCount

    jobSpec.payload.operate(DefaultLFNMaker(jobSpec))

    if firstEvent != None:
        jobSpec.parameters['FirstEvent'] = firstEvent

    cfgMaker = ConfigGenerator(jobSpec)
    jobSpec.payload.operate(cfgMaker)

    if saveString:    
       return jobSpec.saveString()
    jobSpec.save(filename)
    return
开发者ID:PerilousApricot,项目名称:CRAB2,代码行数:43,代码来源:EventJobSpec.py

示例2: __call__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import createJobSpec [as 别名]
    def __call__(self, collectPayload):
        """
        _operator(collectPayload)_

        Given the dataset and run in the payload, callout to DBS
        to find the files to be harvested

        """
        msg = "DBSPlugin invoked for %s" % str(collectPayload)
        logging.info(msg)


        site = self.args.get("Site", "srm.cern.ch")

        baseCache = os.path.join(self.args['ComponentDir'],
                                 "DBSPlugin")
        if not os.path.exists(baseCache):
            os.makedirs(baseCache)

        datasetCache = os.path.join(baseCache,
                                    collectPayload['PrimaryDataset'],
                                    collectPayload['ProcessedDataset'],
                                    collectPayload['DataTier'])

        if not os.path.exists(datasetCache):
            os.makedirs(datasetCache)

        workflowFile = os.path.join(
            datasetCache,
            "%s-%s-%s-DQMHarvest-Workflow.xml" % (
            collectPayload['PrimaryDataset'],
            collectPayload['ProcessedDataset'],
            collectPayload['DataTier'])
            )
        if not os.path.exists(workflowFile):
            msg = "No workflow found for dataset: %s\n " % (
                collectPayload.datasetPath(),)
            msg += "Looking up software version and generating workflow..."

            if self.args.get("OverrideGlobalTag", None) == None:
                globalTag = findGlobalTagForDataset(
                    self.dbsUrl,
                    collectPayload['PrimaryDataset'],
                    collectPayload['ProcessedDataset'],
                    collectPayload['DataTier'],
                    collectPayload['RunNumber'])
            else:
                globalTag = self.args['OverrideGlobalTag']


            if self.args.get("OverrideCMSSW", None) != None:
                cmsswVersion = self.args['OverrideCMSSW']
                msg = "Using Override for CMSSW Version %s" % (
                    self.args['OverrideCMSSW'],)
                logging.info(msg)
            else:
                cmsswVersion = findVersionForDataset(
                    self.dbsUrl,
                    collectPayload['PrimaryDataset'],
                    collectPayload['ProcessedDataset'],
                    collectPayload['DataTier'],
                    collectPayload['RunNumber'])
                msg = "Found CMSSW Version for dataset/run\n"
                msg += " Dataset %s Run %s\n" % (collectPayload.datasetPath(),
                                                 collectPayload['RunNumber'])
                msg += " CMSSW Version = %s\n " % cmsswVersion
                logging.info(msg)

            workflowSpec = createHarvestingWorkflow(
                collectPayload.datasetPath(),
                site,
                self.args['CmsPath'],
                self.args['ScramArch'],
                cmsswVersion,
                globalTag,
                configFile=self.args['ConfigFile'],
                DQMServer=self.args['DQMServer'],
                proxyLocation=self.args['proxyLocation'],
                DQMCopyToCERN=self.args['DQMCopyToCERN'],
                doStageOut=self.args['DoStageOut'])

            workflowSpec.save(workflowFile)
            msg = "Created Harvesting Workflow:\n %s" % workflowFile
            logging.info(msg)
            self.publishWorkflow(workflowFile, workflowSpec.workflowName())
        else:
            msg = "Loading existing workflow for dataset: %s\n " % (
                collectPayload.datasetPath(),)
            msg += " => %s\n" % workflowFile
            logging.info(msg)

            workflowSpec = WorkflowSpec()
            workflowSpec.load(workflowFile)




        job = {}
        jobSpec = workflowSpec.createJobSpec()
        jobName = "%s-%s-%s" % (
#.........这里部分代码省略.........
开发者ID:giffels,项目名称:PRODAGENT,代码行数:103,代码来源:DBSPlugin.py

示例3: __init__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import createJobSpec [as 别名]
class RequestIterator:
    """
    _RequestIterator_

    Working from a Generic Workflow template, generate
    concrete jobs from it, keeping in-memory history

    """
    def __init__(self, workflowSpecFile, workingDir):
        self.workflow = workflowSpecFile
        self.workingDir = workingDir
        self.count = 0
        self.runIncrement = 1
        self.currentJob = None
        self.sitePref = None
        self.pileupDatasets = {}
        self.ownedJobSpecs = {}
        
        #  //
        # // Initially hard coded, should be extracted from Component Config
        #//
        self.eventsPerJob = 10 
        
        self.workflowSpec = WorkflowSpec()
        try:
         self.workflowSpec.load(workflowSpecFile)
        except:
         logging.error("ERROR Loading Workflow: %s " % (workflowSpecFile))
         return

        if self.workflowSpec.parameters.get("RunIncrement", None) != None:
            self.runIncrement = int(
                self.workflowSpec.parameters['RunIncrement']
                )

    
        self.generators = GeneratorMaker()
        self.workflowSpec.payload.operate(self.generators)
        
        
        
        #  //
        # // Cache Area for JobSpecs
        #//
        self.specCache = os.path.join(
            self.workingDir,
            "%s-Cache" %self.workflowSpec.workflowName())
        if not os.path.exists(self.specCache):
            os.makedirs(self.specCache)
        


    def loadPileupDatasets(self):
        """
        _loadPileupDatasets_

        Are we dealing with pileup? If so pull in the file list
        
        """
        puDatasets = self.workflowSpec.pileupDatasets()
        if len(puDatasets) > 0:
            logging.info("Found %s Pileup Datasets for Workflow: %s" % (
                len(puDatasets), self.workflowSpec.workflowName(),
                ))
            self.pileupDatasets = createPileupDatasets(self.workflowSpec)
        return

    def loadPileupSites(self):
        """
        _loadPileupSites_
                                                                                                              
        Are we dealing with pileup? If so pull in the site list
                                                                                                              
        """
        sites = []
        puDatasets = self.workflowSpec.pileupDatasets()
        if len(puDatasets) > 0:
            logging.info("Found %s Pileup Datasets for Workflow: %s" % (
                len(puDatasets), self.workflowSpec.workflowName(),
                ))
            sites = getPileupSites(self.workflowSpec)
        return sites
                                                                                                              


            
    def __call__(self):
        """
        _operator()_

        When called generate a new concrete job payload from the
        generic workflow and return it.

        """
        newJobSpec = self.createJobSpec()
        self.count += self.runIncrement
        return newJobSpec


    def createJobSpec(self):
#.........这里部分代码省略.........
开发者ID:giffels,项目名称:PRODAGENT,代码行数:103,代码来源:RequestIterator.py

示例4: __call__

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import createJobSpec [as 别名]

#.........这里部分代码省略.........
                msg = "CMSSW Version found in payload: %s" % cmsswVersion
                logging.info(msg)
            else:
                cmsswVersion = findVersionForDataset(
                    self.dbsUrl,
                    collectPayload['PrimaryDataset'],
                    collectPayload['ProcessedDataset'],
                    collectPayload['DataTier'],
                    collectPayload['RunNumber'])
                msg = "CMSSW Version for dataset/run\n"
                msg += " Dataset %s\n" % collectPayload.datasetPath()
                msg += " CMSSW Version = %s\n " % cmsswVersion
                logging.info(msg)

            workflowSpec = createHarvestingWorkflow(
                collectPayload.datasetPath(),
                site,
                self.args['CmsPath'],
                self.args['ScramArch'],
                cmsswVersion,
                globalTag,
                configFile=self.args['ConfigFile'],
                DQMServer=self.args['DQMServer'],
                proxyLocation=self.args['proxyLocation'],
                DQMCopyToCERN=self.args['DQMCopyToCERN'],
                doStageOut=self.args['DoStageOut'])
            
            workflowSpec.save(workflowFile)
            msg = "Created Harvesting Workflow:\n %s" % workflowFile
            msg += "\nThe following parameters were used:\n"
            msg += "DQMserver     ==> %s\n" % (self.args['DQMServer'])
            msg += "proxyLocation ==> %s\n" % (self.args['proxyLocation'])
            msg += "Stage Out     ==> %s\n" % (self.args['DoStageOut'])
            msg += "DQMCopyToCERN ==> %s\n" % (self.args['DQMCopyToCERN'])
            logging.info(msg)
            self.publishWorkflow(workflowFile, workflowSpec.workflowName())
        else:
            msg = "Loading existing workflow for dataset: %s\n " % (
                collectPayload.datasetPath())
            msg += " => %s\n" % workflowFile
            logging.info(msg)

            workflowSpec = WorkflowSpec()
            workflowSpec.load(workflowFile)

        job = {}
        jobSpec = workflowSpec.createJobSpec()
        jobName = "%s-%s-%s" % (
            workflowSpec.workflowName(),
            collectPayload['RunNumber'],
            time.strftime("%H-%M-%S-%d-%m-%y")
            )

        jobSpec.setJobName(jobName)
        jobSpec.setJobType("Harvesting")

        # Adding specific parameters to the JobSpec
        jobSpec.parameters['RunNumber'] = collectPayload['RunNumber']  # How should we manage the run numbers?
        jobSpec.parameters['Scenario'] = collectPayload['Scenario']
        if collectPayload.get('RefHistKey', None) is not None:
            jobSpec.parameters['RefHistKey'] = collectPayload['RefHistKey']

        jobSpec.addWhitelistSite(site)
        jobSpec.payload.operate(DefaultLFNMaker(jobSpec))
        jobSpec.payload.cfgInterface.inputFiles.extend(
            getLFNForDataset(self.dbsUrl,
                             collectPayload['PrimaryDataset'],
                             collectPayload['ProcessedDataset'],
                             collectPayload['DataTier'],
                             run=collectPayload['RunNumber']))

        specCacheDir =  os.path.join(
            datasetCache, str(int(collectPayload['RunNumber']) // 1000).zfill(4))
        if not os.path.exists(specCacheDir):
            os.makedirs(specCacheDir)
        jobSpecFile = os.path.join(specCacheDir,
                                   "%s-JobSpec.xml" % jobName)

        jobSpec.save(jobSpecFile)

        job["JobSpecId"] = jobName
        job["JobSpecFile"] = jobSpecFile
        job['JobType'] = "Harvesting"
        job["WorkflowSpecId"] = workflowSpec.workflowName(),
        job["WorkflowPriority"] = 10
        job["Sites"] = [site]
        job["Run"] = collectPayload['RunNumber']
        job['WorkflowSpecFile'] = workflowFile

        msg = "Harvesting Job Created for\n"
        msg += " => Run:       %s\n" % collectPayload['RunNumber']
        msg += " => Primary:   %s\n" % collectPayload['PrimaryDataset']
        msg += " => Processed: %s\n" % collectPayload['ProcessedDataset']
        msg += " => Tier:      %s\n" % collectPayload['DataTier']
        msg += " => Workflow:  %s\n" % job['WorkflowSpecId']
        msg += " => Job:       %s\n" % job['JobSpecId']
        msg += " => Site:      %s\n" % job['Sites']
        logging.info(msg)

        return [job]
开发者ID:giffels,项目名称:PRODAGENT,代码行数:104,代码来源:RelValPlugin.py

示例5: range

# 需要导入模块: from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec [as 别名]
# 或者: from ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec import createJobSpec [as 别名]
    # if this is needed we should create
    # a JobCreator instance per run
    #workflowSpec.setWorkflowRunNumber(run)

    jobList = []
    for lumi in range(1,lumiperrun+1):

        jobCreator.setLumi(lumi)
        jobCreator.setEventsPerJob(eventsperjob)
        jobCreator.setFirstEvent(1+lumi*eventsperjob)

        jobName = "%s-%s-%s" % (workflowSpec.workflowName(),
                                run, lumi)

        jobSpec = workflowSpec.createJobSpec()

        jobSpecDir =  os.path.join("/data/hufnagel/parepack/StreamerMCRunning",
                                   str(run // 1000).zfill(4))
        if not os.path.exists(jobSpecDir):
            os.makedirs(jobSpecDir)

        jobSpecFileName = jobName + "-jobspec.xml"
        jobSpecFile = os.path.join(jobSpecDir, jobSpecFileName) 

        jobSpec.setJobName(jobName)

        # used for thresholds
        jobSpec.setJobType("Processing")

        # this sets lumi section !!!
开发者ID:TonyWildish,项目名称:CSA06-T0-prototype,代码行数:32,代码来源:injectMCStreamerWorkflow.py


注:本文中的ProdCommon.MCPayloads.WorkflowSpec.WorkflowSpec.createJobSpec方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。