当前位置: 首页>>代码示例>>Python>>正文


Python DBSBufferUtil.DBSBufferUtil类代码示例

本文整理汇总了Python中WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil的典型用法代码示例。如果您正苦于以下问题:Python DBSBufferUtil类的具体用法?Python DBSBufferUtil怎么用?Python DBSBufferUtil使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了DBSBufferUtil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: DrainStatusAPI

class DrainStatusAPI(object):
    """
    Provides methods for querying dbs and condor for drain statistics
    """
    def __init__(self):

        self.dbsUtil = DBSBufferUtil()
        self.condorAPI = PyCondorAPI()

    def collectDrainInfo(self):
        """
        Call methods to check the drain status
        """
        results = {}
        results['workflows_completed'] = self.checkWorkflows()

        # if workflows are completed, collect additional drain statistics
        if results['workflows_completed']:
            results['upload_status'] = self.checkFileUploadStatus()
            results['condor_status'] = self.checkCondorStates()

        return results

    def checkWorkflows(self):
        """
        Check to see if all workflows have a 'completed' status
        """
        results = self.dbsUtil.isAllWorkflowCompleted()
        return results

    def checkCondorStates(self):
        """
        Check idle and running jobs in Condor
        """
        results = {}
        queries = [["1", "idle"], ["2", "running"]]

        for query in queries:
            jobs = self.condorAPI.getCondorJobs("JobStatus=="+query[0], [])
            # if there is an error, report it instead of the length of an empty list
            if jobs is None:
                results[query[1]] = "unknown (schedd query error)"
            else:
                results[query[1]] = len(jobs)

        return results

    def checkFileUploadStatus(self):
        """
        Check file upload status:
            Blocks open in DBS
            Files not uploaded in DBS
            Files not uploaded to Phedex
        """
        results = {}
        results['dbs_open_blocks'] = self.dbsUtil.countOpenBlocks()
        results['dbs_notuploaded'] = self.dbsUtil.countFilesByStatus(status="NOTUPLOADED")
        results['phedex_notuploaded'] = self.dbsUtil.countPhedexNotUploaded()
        return results
开发者ID:DAMason,项目名称:WMCore,代码行数:59,代码来源:DrainStatusAPI.py

示例2: __init__

 def __init__(self, config):
     # queue url used in WorkQueueManager
     self.thisAgentUrl = "http://" + config.Agent.hostName + ":5984"
     self.globalBackend = WorkQueueBackend(config.WorkloadSummary.couchurl)
     self.localBackend = WorkQueueBackend(config.WorkQueueManager.couchurl)
     self.dbsUtil = DBSBufferUtil()
     self.condorAPI = PyCondorAPI()
开发者ID:dmwm,项目名称:WMCore,代码行数:7,代码来源:DrainStatusAPI.py

示例3: __init__

    def __init__(self, config):
        """
        Initialise class members
        """
        logging.info("Running __init__ for DBS3 Uploader")
        BaseWorkerThread.__init__(self)
        self.config = config

        # This is slightly dangerous, but DBSUpload depends
        # on DBSInterface anyway
        self.dbsUrl = self.config.DBS3Upload.dbsUrl

        self.dbsUtil = DBSBufferUtil()

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.pool = []
        self.blocksToCheck = []
        self.workInput = None
        self.workResult = None
        self.nProc = getattr(self.config.DBS3Upload, 'nProcesses', 4)
        self.wait = getattr(self.config.DBS3Upload, 'dbsWaitTime', 2)
        self.nTries = getattr(self.config.DBS3Upload, 'dbsNTries', 300)
        self.physicsGroup = getattr(self.config.DBS3Upload, "physicsGroup", "NoGroup")
        self.datasetType = getattr(self.config.DBS3Upload, "datasetType", "PRODUCTION")
        self.primaryDatasetType = getattr(self.config.DBS3Upload, "primaryDatasetType", "mc")
        self.blockCount = 0
        self.dbsApi = DbsApi(url=self.dbsUrl)

        # List of blocks currently in processing
        self.queuedBlocks = []

        # Set up the pool of worker processes
        self.setupPool()

        # Setting up any cache objects
        self.blockCache = {}

        self.filesToUpdate = []

        self.produceCopy = getattr(self.config.DBS3Upload, 'dumpBlock', False)

        self.copyPath = os.path.join(getattr(self.config.DBS3Upload, 'componentDir', '/data/srv/'),
                                     'dbsuploader_block.json')

        self.timeoutWaiver = 1

        return
开发者ID:DAMason,项目名称:WMCore,代码行数:51,代码来源:DBSUploadPoller.py

示例4: __init__

    def __init__(self, config, dbsconfig = None):
        """
        Initialise class members
        """
        logging.info("Running __init__ for DBS3 Uploader")
        BaseWorkerThread.__init__(self)
        self.config     = config

        # This is slightly dangerous, but DBSUpload depends
        # on DBSInterface anyway
        self.dbsUrl           = self.config.DBS3Upload.dbsUrl

        self.dbsUtil = DBSBufferUtil()


        self.pool   = []
        self.blocksToCheck = []
        self.input  = None
        self.result = None
        self.nProc  = getattr(self.config.DBS3Upload, 'nProcesses', 4)
        self.wait   = getattr(self.config.DBS3Upload, 'dbsWaitTime', 2)
        self.nTries = getattr(self.config.DBS3Upload, 'dbsNTries', 300)
        self.dbs3UploadOnly = getattr(self.config.DBS3Upload, "dbs3UploadOnly", False)
        self.physicsGroup   = getattr(self.config.DBS3Upload, "physicsGroup", "NoGroup")
        self.datasetType    = getattr(self.config.DBS3Upload, "datasetType", "PRODUCTION")
        self.primaryDatasetType = getattr(self.config.DBS3Upload, "primaryDatasetType", "mc")
        self.blockCount     = 0
        self.dbsApi = DbsApi(url = self.dbsUrl)

        # List of blocks currently in processing
        self.queuedBlocks = []

        # Set up the pool of worker processes
        self.setupPool()

        # Setting up any cache objects
        self.blockCache = {}
        self.dasCache   = {}

        self.filesToUpdate = []

        self.produceCopy = getattr(self.config.DBS3Upload, 'copyBlock', False)
        self.copyPath    = getattr(self.config.DBS3Upload, 'copyBlockPath',
                                   '/data/mnorman/block.json')
        
        self.timeoutWaiver = 1

        return
开发者ID:ramandeepkumar,项目名称:WMCore,代码行数:48,代码来源:DBSUploadPoller.py

示例5: setup

    def setup(self, parameters):
        """
        set db connection(couchdb, wmbs) to prepare to gather information
        """
        # set the connection to local queue
        if not hasattr(self.config, "Tier0Feeder"):
            self.localQueue = WorkQueueService(self.config.AnalyticsDataCollector.localQueueURL)

        # set the connection for local couchDB call
        self.localCouchDB = LocalCouchDBData(self.config.AnalyticsDataCollector.localCouchURL,
                                             self.config.JobStateMachine.summaryStatsDBName,
                                             self.summaryLevel)

        # interface to WMBS/BossAir db
        myThread = threading.currentThread()
        # set wmagent db data
        self.wmagentDB = WMAgentDBData(self.summaryLevel, myThread.dbi, myThread.logger)
        # set the connection for local couchDB call
        self.localSummaryCouchDB = WMStatsWriter(self.config.AnalyticsDataCollector.localWMStatsURL,
                                                 appName="WMStatsAgent")

        # use local db for tier0
        if hasattr(self.config, "Tier0Feeder"):
            centralRequestCouchDBURL = self.config.AnalyticsDataCollector.localT0RequestDBURL
        else:
            centralRequestCouchDBURL = self.config.AnalyticsDataCollector.centralRequestDBURL

        self.centralRequestCouchDB = RequestDBWriter(centralRequestCouchDBURL,
                                                     couchapp=self.config.AnalyticsDataCollector.RequestCouchApp)
        self.centralWMStatsCouchDB = WMStatsWriter(self.config.General.centralWMStatsURL)

        #TODO: change the config to hold couch url
        self.localCouchServer = CouchMonitor(self.config.JobStateMachine.couchurl)

        self.dbsBufferUtil = DBSBufferUtil()

        if self.pluginName is not None:
            pluginFactory = WMFactory("plugins", "WMComponent.AnalyticsDataCollector.Plugins")
            self.plugin = pluginFactory.loadObject(classname=self.pluginName)
开发者ID:dmwm,项目名称:WMCore,代码行数:39,代码来源:AnalyticsPoller.py

示例6: setUp

    def setUp(self):
        """
        _setUp_

        Setup the database and logging connection.  Create some DBSBuffer tables and fake data for testing
        """
        self.testInit = TestInit(__file__)
        self.testInit.setLogging()
        self.testInit.setDatabaseConnection()
        self.testInit.setSchema(customModules=["WMComponent.DBS3Buffer", "WMCore.WMBS"],
                                useDefault=False)

        myThread = threading.currentThread()
        self.dbsbufferFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.wmbsFactory = DAOFactory(package="WMCore.WMBS",
                                           logger=myThread.logger,
                                           dbinterface=myThread.dbi)
        self.dbsUtil = DBSBufferUtil()

        # Create two test dbsbuffer workflows
        insertWorkflow = self.dbsbufferFactory(classname="InsertWorkflow")
        insertWorkflow.execute("Test1", "Task1", 0, 0, 0, 0)
        insertWorkflow.execute("Test2", "Task2", 0, 0, 0, 0)

        # Update one workflow to "completed" state
        updateWorkflow = self.dbsbufferFactory(classname="UpdateWorkflowsToCompleted")
        updateWorkflow.execute(["Test1"])

        # Create a test wmbs workflow
        testWorkflow = Workflow(spec="somespec.xml", owner="Erik", name="Test1", task="Task1")
        testWorkflow.create()

        # Create a test dbsbuffer file
        self.createTestFiles()
开发者ID:alexanderrichards,项目名称:WMCore,代码行数:37,代码来源:DBSBufferUtil_t.py

示例7: testBulkLoad

    def testBulkLoad(self):
        """
        _testBulkLoad_

        Can we load in bulk?
        """


        addToBuffer = DBSBufferUtil()

        bulkLoad = self.daoFactory(classname = "DBSBufferFiles.LoadBulkFilesByID")


        testFileChildA = DBSBufferFile(lfn = "/this/is/a/child/lfnA", size = 1024,
                                        events = 20)
        testFileChildA.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildA.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")
        testFileChildB = DBSBufferFile(lfn = "/this/is/a/child/lfnB", size = 1024,
                                        events = 20)
        testFileChildB.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildB.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFileChildC = DBSBufferFile(lfn = "/this/is/a/child/lfnC", size = 1024,
                                        events = 20)
        testFileChildC.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                                    appFam = "RECO", psetHash = "GIBBERISH",
                                    configContent = "MOREGIBBERISH")
        testFileChildC.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        
        testFileChildA.create()
        testFileChildB.create()
        testFileChildC.create()

        testFileChildA.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildB.setLocation(["se1.fnal.gov", "se1.cern.ch"])
        testFileChildC.setLocation(["se1.fnal.gov", "se1.cern.ch"])

        runSet = set()
        runSet.add(Run( 1, *[45]))
        runSet.add(Run( 2, *[67, 68]))
        testFileChildA.addRunSet(runSet)
        testFileChildB.addRunSet(runSet)
        testFileChildC.addRunSet(runSet)


        testFileChildA.save()
        testFileChildB.save()
        testFileChildC.save()

        setCksumAction = self.daoFactory(classname = "DBSBufferFiles.AddChecksumByLFN")
        binds = [{'lfn': "/this/is/a/child/lfnA", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnA", 'cktype': 'cksum', 'cksum': 101},
                 {'lfn': "/this/is/a/child/lfnB", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnB", 'cktype': 'cksum', 'cksum': 101},
                 {'lfn': "/this/is/a/child/lfnC", 'cktype': 'adler32', 'cksum': 201},
                 {'lfn': "/this/is/a/child/lfnC", 'cktype': 'cksum', 'cksum': 101}]
        setCksumAction.execute(bulkList = binds)        

        testFile = DBSBufferFile(lfn = "/this/is/a/lfn", size = 1024,
                                 events = 10)
        testFile.setAlgorithm(appName = "cmsRun", appVer = "CMSSW_2_1_8",
                              appFam = "RECO", psetHash = "GIBBERISH",
                              configContent = "MOREGIBBERISH")
        testFile.setDatasetPath("/Cosmics/CRUZET09-PromptReco-v1/RECO")        
        testFile.create()

        testFileChildA.addParents([testFile["lfn"]])
        testFileChildB.addParents([testFile["lfn"]])
        testFileChildC.addParents([testFile["lfn"]])


        binds = [{'id': testFileChildA.exists()},
                 {'id': testFileChildB.exists()},
                 {'id': testFileChildC.exists()}]

        listOfFiles = addToBuffer.loadDBSBufferFilesBulk(fileObjs = binds)

        #print listOfFiles


        compareList = ['locations', 'psetHash', 'configContent', 'appName',
                       'appVer', 'appFam', 'events', 'datasetPath', 'runs']


        for f in listOfFiles:
            self.assertTrue(f['lfn'] in ["/this/is/a/child/lfnA", "/this/is/a/child/lfnB",
                                         "/this/is/a/child/lfnC"],
                            "Unknown file in loaded results")
            self.assertEqual(f['checksums'], {'adler32': '201', 'cksum': '101'})
            for parent in f['parents']:
                self.assertEqual(parent['lfn'], testFile['lfn'])
            for key in compareList:
                self.assertEqual(f[key], testFileChildA[key])
开发者ID:zhiwenuil,项目名称:WMCore,代码行数:96,代码来源:DBSBufferFile_t.py

示例8: testCloseSettingsPerWorkflow

    def testCloseSettingsPerWorkflow(self):
        """
        _testCloseSettingsPerWorkflow_

        Test the block closing mechanics in the DBS3 uploader,
        this uses a fake dbs api to avoid reliance on external services.
        """
        # Signal trapExit that we are a friend
        os.environ["DONT_TRAP_EXIT"] = "True"
        try:
            # Monkey patch the imports of DbsApi
            from WMComponent.DBS3Buffer import DBSUploadPoller as MockDBSUploadPoller
            MockDBSUploadPoller.DbsApi = MockDbsApi
    
            # Set the poller and the dbsUtil for verification
            myThread = threading.currentThread()
            (_, dbsFilePath) = mkstemp(dir = self.testDir)
            self.dbsUrl = dbsFilePath
            config = self.getConfig()
            dbsUploader = MockDBSUploadPoller.DBSUploadPoller(config = config)
            dbsUtil = DBSBufferUtil()
    
            # First test is event based limits and timeout with no new files.
            # Set the files and workflow
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 100,
                                MaxEvents = 150)
            self.createParentFiles(acqEra, nFiles = 20,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
    
            # The algorithm needs to be run twice.  On the first iteration it will
            # create all the blocks and upload one with less than 150 events.
            # On the second iteration the second block is uploaded.
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            globalFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'InDBS'")[0].fetchall()
            notUploadedFiles = myThread.dbi.processData("SELECT id FROM dbsbuffer_file WHERE status = 'NOTUPLOADED'")[0].fetchall()
            self.assertEqual(len(globalFiles), 14)
            self.assertEqual(len(notUploadedFiles), 6)
            # Check the fake DBS for data
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 2)
            for block in fakeDBSInfo:
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['file_count'], 7)
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)
            time.sleep(3)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 0)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 3)
            for block in fakeDBSInfo:
                if block['block']['file_count'] != 6:
                    self.assertEqual(block['block']['file_count'], 7)
                self.assertTrue('block_events' not in block['block'])
                self.assertEqual(block['block']['open_for_writing'], 0)
                self.assertTrue('close_settings' not in block)

            # Now check the limit by size and timeout with new files
            acqEra = "TropicalSeason%s" % (int(time.time()))
            workflowName = 'TestWorkload%s' % (int(time.time()))
            taskPath = '/%s/TestProcessing' % workflowName
            self.injectWorkflow(workflowName, taskPath,
                                MaxWaitTime = 2, MaxFiles = 5,
                                MaxEvents = 200000000)
            self.createParentFiles(acqEra, nFiles = 16,
                                   workflowName = workflowName,
                                   taskPath = taskPath)
            dbsUploader.algorithm()
            dbsUploader.checkBlocks()
            openBlocks = dbsUtil.findOpenBlocks()
            self.assertEqual(len(openBlocks), 1)
            fakeDBS = open(self.dbsUrl, 'r')
            fakeDBSInfo = json.load(fakeDBS)
            fakeDBS.close()
            self.assertEqual(len(fakeDBSInfo), 6)
            for block in fakeDBSInfo:
                if acqEra in block['block']['block_name']:
                    self.assertEqual(block['block']['file_count'], 5)
                self.assertTrue('block_events' not in block['block'])
                self.assertTrue('close_settings' not in block)
                self.assertEqual(block['block']['open_for_writing'], 0)
    
            # Put more files, they will go into the same block and then it will be closed
            # after timeout
            time.sleep(3)
            self.createParentFiles(acqEra, nFiles = 3,
#.........这里部分代码省略.........
开发者ID:AndrewLevin,项目名称:WMCore,代码行数:101,代码来源:DBSUpload_t.py

示例9: testDualUpload

    def testDualUpload(self):
        """
        _testDualUpload_

        Verify that the dual upload mode works correctly.
        """
        self.dbsApi = DbsApi(url = self.dbsUrl)
        config = self.getConfig(dbs3UploadOnly = True)
        dbsUploader = DBSUploadPoller(config = config)
        dbsUtil = DBSBufferUtil()

        # First test verifies that uploader will poll and then not do anything
        # as the database is empty.
        dbsUploader.algorithm()

        acqEra = "Summer%s" % (int(time.time()))
        parentFiles = self.createParentFiles(acqEra)
        (moreParentFiles, childFiles) = \
                          self.createFilesWithChildren(parentFiles, acqEra)

        allFiles = parentFiles + moreParentFiles
        allBlocks = []
        for i in range(4):
            blockName = parentFiles[0]["datasetPath"] + "#" + makeUUID()
            dbsBlock = DBSBlock(blockName, "malpaquet", 1)
            dbsBlock.status = "Open"                
            dbsUtil.createBlocks([dbsBlock])
            for file in allFiles[i * 5 : (i * 5) + 5]:
                dbsBlock.addFile(file)
                dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})
                if i < 2:
                    dbsBlock.status = "InDBS"
                dbsUtil.updateBlocks([dbsBlock])
            dbsUtil.updateFileStatus([dbsBlock], "InDBS")
            allBlocks.append(dbsBlock)            

        blockName = childFiles[0]["datasetPath"] + "#" + makeUUID()
        dbsBlock = DBSBlock(blockName, "malpaquet", 1)
        dbsBlock.status = "InDBS"
        dbsUtil.createBlocks([dbsBlock])
        for file in childFiles:
            dbsBlock.addFile(file)
            dbsUtil.setBlockFiles({"block": blockName, "filelfn": file["lfn"]})

        dbsUtil.updateFileStatus([dbsBlock], "InDBS")

        dbsUploader.algorithm()
        time.sleep(5)
        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles)

        # Change the status of the rest of the parent blocks so we can upload
        # them and the children.
        for dbsBlock in allBlocks:
            dbsBlock.status = "InDBS"            
            dbsUtil.updateBlocks([dbsBlock])

        dbsUploader.algorithm()
        time.sleep(5)

        self.verifyData(parentFiles[0]["datasetPath"], parentFiles + moreParentFiles)

        # Run the uploader one more time to upload the children.
        dbsUploader.algorithm()
        time.sleep(5)        
    
        self.verifyData(childFiles[0]["datasetPath"], childFiles)
        return
开发者ID:AndrewLevin,项目名称:WMCore,代码行数:70,代码来源:DBSUpload_t.py

示例10: __init__

    def __init__(self):

        self.dbsUtil = DBSBufferUtil()
        self.condorAPI = PyCondorAPI()
开发者ID:DAMason,项目名称:WMCore,代码行数:4,代码来源:DrainStatusAPI.py

示例11: DrainStatusAPI

class DrainStatusAPI(object):
    """
    Provides methods for querying dbs and condor for drain statistics
    """
    def __init__(self, config):
        # queue url used in WorkQueueManager
        self.thisAgentUrl = "http://" + config.Agent.hostName + ":5984"
        self.globalBackend = WorkQueueBackend(config.WorkloadSummary.couchurl)
        self.localBackend = WorkQueueBackend(config.WorkQueueManager.couchurl)
        self.dbsUtil = DBSBufferUtil()
        self.condorAPI = PyCondorAPI()

    def collectDrainInfo(self):
        """
        Call methods to check the drain status
        """
        results = {}
        results['workflows_completed'] = self.checkWorkflows()

        # if workflows are completed, collect additional drain statistics
        if results['workflows_completed']:
            results['upload_status'] = self.checkFileUploadStatus()
            results['condor_status'] = self.checkCondorStates()
            results['local_wq_status'] = self.checkLocalWQStatus(dbname="workqueue")
            results['local_wqinbox_status'] = self.checkLocalWQStatus(dbname="workqueue_inbox")
            results['global_wq_status'] = self.checkGlobalWQStatus()

        return results

    def checkWorkflows(self):
        """
        Check to see if all workflows have a 'completed' status
        """
        results = self.dbsUtil.isAllWorkflowCompleted()
        return results

    def checkCondorStates(self):
        """
        Check idle and running jobs in Condor
        """
        results = {}
        queries = [["1", "idle"], ["2", "running"]]

        for query in queries:
            jobs = self.condorAPI.getCondorJobs("JobStatus=="+query[0], [])
            # if there is an error, report it instead of the length of an empty list
            if jobs is None:
                results[query[1]] = "unknown (schedd query error)"
            else:
                results[query[1]] = len(jobs)

        return results

    def checkFileUploadStatus(self):
        """
        Check file upload status:
            Blocks open in DBS
            Files not uploaded in DBS
            Files not uploaded to Phedex
        """
        results = {}
        results['dbs_open_blocks'] = self.dbsUtil.countOpenBlocks()
        results['dbs_notuploaded'] = self.dbsUtil.countFilesByStatus(status="NOTUPLOADED")
        results['phedex_notuploaded'] = self.dbsUtil.countPhedexNotUploaded()
        return results

    def checkLocalWQStatus(self, dbname):
        """
        Query local WorkQueue workqueue/workqueue_inbox database to see whether
        there are any active elements in this agent.
        """
        results = {}

        for st in ('Available', 'Negotiating', 'Acquired', 'Running'):
            if dbname == "workqueue":
                elements = self.localBackend.getElements(status=st, returnIdOnly=True)
            else:
                elements = self.localBackend.getInboxElements(status=st, returnIdOnly=True)
            results[st] = len(elements)
        return results

    def checkGlobalWQStatus(self):
        """
        Query Global WorkQueue workqueue database to see whether there are
        any active elements set to this agent.
        """
        results = {}

        for st in ("Acquired", "Running"):
            elements = self.globalBackend.getElements(status=st, returnIdOnly=True,
                                                      ChildQueueUrl=self.thisAgentUrl)
            results[st] = len(elements)
        return results
开发者ID:dmwm,项目名称:WMCore,代码行数:93,代码来源:DrainStatusAPI.py

示例12: testA_basicFunction

    def testA_basicFunction(self):
        """
        _basicFunction_

        See if I can make the damn thing work.
        """
        myThread = threading.currentThread()

        config = self.getConfig()

        from WMComponent.DBS3Buffer.DBSUploadPoller import DBSUploadPoller
        dbsUploader = DBSUploadPoller(config = config)
        dbsUtil     = DBSBufferUtil()
        from dbs.apis.dbsClient import DbsApi
        dbsApi      = DbsApi(url = config.DBSUpload.dbsUrl)

        # This should do nothing
        # Just making sure we don't crash
        try:
            dbsUploader.algorithm()
        except:
            dbsUploader.close()
            raise

        name = "ThisIsATest%s" % (int(time.time()))
        tier = "RECO"
        nFiles = 12
        name = name.replace('-', '_')
        name = '%s-v0' % name
        files = self.getFiles(name = name, tier = tier, nFiles = nFiles)
        datasetPath = "/Cosmics/%s/%s" % (name, tier)

        try:
            dbsUploader.algorithm()
        except:
            dbsUploader.close()
            raise

        time.sleep(5)

        # Now look in DBS
        try:
            result = dbsApi.listDatasets(dataset = datasetPath, detail = True,
                                         dataset_access_type = 'PRODUCTION')
            self.assertEqual(len(result), 1)
            self.assertEqual(result[0]['data_tier_name'], 'RECO')
            self.assertEqual(result[0]['processing_version'], 0)
            self.assertEqual(result[0]['acquisition_era_name'], name.split('-')[0])

            result = dbsApi.listFiles(dataset=datasetPath)
            self.assertEqual(len(result), 11)
        except:
            dbsUploader.close()
            raise

        # All the blocks except for the last one should
        # now be there
        result = myThread.dbi.processData("SELECT id FROM dbsbuffer_block")[0].fetchall()
        self.assertEqual(len(result), 12)

        # The last block should still be open
        self.assertEqual(len(dbsUtil.findOpenBlocks()), 1)

        try:
            dbsUploader.algorithm()
        except:
            raise
        finally:
            dbsUploader.close()

        # All files should now be available
        result = dbsApi.listFiles(dataset=datasetPath)
        self.assertEqual(len(result), 12)


        # The last block should now be closed
        self.assertEqual(len(dbsUtil.findOpenBlocks()), 0)

        result = myThread.dbi.processData("SELECT status FROM dbsbuffer_block")[0].fetchall()
        for res in result:
            self.assertEqual(res.values()[0], 'InDBS')

        return
开发者ID:ticoann,项目名称:WMCore,代码行数:83,代码来源:DBSUpload_t.py

示例13: DBSUploadPoller

class DBSUploadPoller(BaseWorkerThread):
    """
    Handles poll-based DBSUpload

    """

    def __init__(self, config):
        """
        Initialise class members
        """
        logging.info("Running __init__ for DBS3 Uploader")
        BaseWorkerThread.__init__(self)
        self.config = config

        # This is slightly dangerous, but DBSUpload depends
        # on DBSInterface anyway
        self.dbsUrl = self.config.DBS3Upload.dbsUrl

        self.dbsUtil = DBSBufferUtil()

        myThread = threading.currentThread()
        self.daoFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                     logger=myThread.logger,
                                     dbinterface=myThread.dbi)

        self.pool = []
        self.blocksToCheck = []
        self.workInput = None
        self.workResult = None
        self.nProc = getattr(self.config.DBS3Upload, 'nProcesses', 4)
        self.wait = getattr(self.config.DBS3Upload, 'dbsWaitTime', 2)
        self.nTries = getattr(self.config.DBS3Upload, 'dbsNTries', 300)
        self.physicsGroup = getattr(self.config.DBS3Upload, "physicsGroup", "NoGroup")
        self.datasetType = getattr(self.config.DBS3Upload, "datasetType", "PRODUCTION")
        self.primaryDatasetType = getattr(self.config.DBS3Upload, "primaryDatasetType", "mc")
        self.blockCount = 0
        self.dbsApi = DbsApi(url=self.dbsUrl)

        # List of blocks currently in processing
        self.queuedBlocks = []

        # Set up the pool of worker processes
        self.setupPool()

        # Setting up any cache objects
        self.blockCache = {}

        self.filesToUpdate = []

        self.produceCopy = getattr(self.config.DBS3Upload, 'dumpBlock', False)

        self.copyPath = os.path.join(getattr(self.config.DBS3Upload, 'componentDir', '/data/srv/'),
                                     'dbsuploader_block.json')

        self.timeoutWaiver = 1

        return

    def setupPool(self):
        """
        _setupPool_

        Set up the processing pool for work
        """
        if len(self.pool) > 0:
            # Then something already exists.  Continue
            return

        self.workInput = multiprocessing.Queue()
        self.workResult = multiprocessing.Queue()

        # Starting up the pool:
        for _ in range(self.nProc):
            p = multiprocessing.Process(target=uploadWorker,
                                        args=(self.workInput,
                                              self.workResult,
                                              self.dbsUrl))
            p.start()
            self.pool.append(p)

        return

    def __del__(self):
        """
        __del__

        Trigger a close of connections if necessary
        """
        self.close()
        return

    def close(self):
        """
        _close_

        Kill all connections and terminate
        """
        terminate = False
        for _ in self.pool:
            try:
#.........这里部分代码省略.........
开发者ID:DAMason,项目名称:WMCore,代码行数:101,代码来源:DBSUploadPoller.py

示例14: __init__

    def __init__(self, config):
        """
        Initialise class members
        """
        logging.info("Running __init__ for DBS3 Uploader")
        BaseWorkerThread.__init__(self)
        self.config = config

        # This is slightly dangerous, but DBSUpload depends
        # on DBSInterface anyway
        self.dbsUrl = self.config.DBS3Upload.dbsUrl

        # Tier0 Agent don't need this
        if hasattr(self.config, "Tier0Feeder"):
            self.wmstatsServerSvc = None
        else:
            wmstatsSvcURL = self.config.General.centralWMStatsURL.replace("couchdb/wmstats",
                                                                          "wmstatsserver")
            self.wmstatsServerSvc = WMStatsServer(wmstatsSvcURL)

        self.dbsUtil = DBSBufferUtil()

        myThread = threading.currentThread()
        daoFactory = DAOFactory(package="WMComponent.DBS3Buffer",
                                logger=myThread.logger,
                                dbinterface=myThread.dbi)
        self.updateBlocksDAO = daoFactory(classname="UpdateBlocks")
        self.updateFilesDAO = daoFactory(classname="UpdateFiles")
        self.createBlocksDAO = daoFactory(classname="CreateBlocks")
        self.setBlockFilesDAO = daoFactory(classname="SetBlockFiles")

        self.pool = []
        self.blocksToCheck = []
        self.workInput = None
        self.workResult = None
        self.nProc = getattr(self.config.DBS3Upload, 'nProcesses', 4)
        self.wait = getattr(self.config.DBS3Upload, 'dbsWaitTime', 2)
        self.nTries = getattr(self.config.DBS3Upload, 'dbsNTries', 300)
        self.physicsGroup = getattr(self.config.DBS3Upload, "physicsGroup", "NoGroup")
        self.datasetType = getattr(self.config.DBS3Upload, "datasetType", "PRODUCTION")
        self.primaryDatasetType = getattr(self.config.DBS3Upload, "primaryDatasetType", "mc")
        self.blockCount = 0
        self.dbsApi = DbsApi(url=self.dbsUrl)

        # List of blocks currently in processing
        self.queuedBlocks = []

        # Set up the pool of worker processes
        self.setupPool()

        # Setting up any cache objects
        self.blockCache = {}

        self.filesToUpdate = []

        self.produceCopy = getattr(self.config.DBS3Upload, 'dumpBlock', False)

        self.copyPath = os.path.join(getattr(self.config.DBS3Upload, 'componentDir', '/data/srv/'),
                                     'dbsuploader_block.json')

        self.timeoutWaiver = 1

        self.datasetParentageCache = {}

        return
开发者ID:dmwm,项目名称:WMCore,代码行数:65,代码来源:DBSUploadPoller.py

示例15: AnalyticsPoller

class AnalyticsPoller(BaseWorkerThread):
    """
    Gether the summary data for request (workflow) from local queue,
    local job couchdb, wmbs/boss air and populate summary db for monitoring
    """
    def __init__(self, config):
        """
        initialize properties specified from config
        """
        BaseWorkerThread.__init__(self)
        # set the workqueue service for REST call
        self.config = config
        # need to get campaign, user, owner info
        self.agentInfo = initAgentInfo(self.config)
        self.summaryLevel = (config.AnalyticsDataCollector.summaryLevel).lower()
        self.pluginName = getattr(config.AnalyticsDataCollector, "pluginName", None)
        self.plugin = None

    def setup(self, parameters):
        """
        set db connection(couchdb, wmbs) to prepare to gather information
        """
        # set the connection to local queue
        if not hasattr(self.config, "Tier0Feeder"):
            self.localQueue = WorkQueueService(self.config.AnalyticsDataCollector.localQueueURL)

        # set the connection for local couchDB call
        self.localCouchDB = LocalCouchDBData(self.config.AnalyticsDataCollector.localCouchURL,
                                             self.config.JobStateMachine.summaryStatsDBName,
                                             self.summaryLevel)

        # interface to WMBS/BossAir db
        myThread = threading.currentThread()
        # set wmagent db data
        self.wmagentDB = WMAgentDBData(self.summaryLevel, myThread.dbi, myThread.logger)
        # set the connection for local couchDB call
        self.localSummaryCouchDB = WMStatsWriter(self.config.AnalyticsDataCollector.localWMStatsURL,
                                                 appName="WMStatsAgent")

        # use local db for tier0
        if hasattr(self.config, "Tier0Feeder"):
            centralRequestCouchDBURL = self.config.AnalyticsDataCollector.localT0RequestDBURL
        else:
            centralRequestCouchDBURL = self.config.AnalyticsDataCollector.centralRequestDBURL

        self.centralRequestCouchDB = RequestDBWriter(centralRequestCouchDBURL,
                                                     couchapp=self.config.AnalyticsDataCollector.RequestCouchApp)
        self.centralWMStatsCouchDB = WMStatsWriter(self.config.General.centralWMStatsURL)

        #TODO: change the config to hold couch url
        self.localCouchServer = CouchMonitor(self.config.JobStateMachine.couchurl)

        self.dbsBufferUtil = DBSBufferUtil()

        if self.pluginName is not None:
            pluginFactory = WMFactory("plugins", "WMComponent.AnalyticsDataCollector.Plugins")
            self.plugin = pluginFactory.loadObject(classname=self.pluginName)

    @timeFunction
    def algorithm(self, parameters):
        """
        get information from wmbs, workqueue and local couch
        """
        try:
            # jobs per request info
            logging.info("Getting Job Couch Data ...")
            jobInfoFromCouch = self.localCouchDB.getJobSummaryByWorkflowAndSite()

            # fwjr per request info
            logging.info("Getting FWJRJob Couch Data ...")

            fwjrInfoFromCouch = self.localCouchDB.getJobPerformanceByTaskAndSiteFromSummaryDB()
            skippedInfoFromCouch = self.localCouchDB.getSkippedFilesSummaryByWorkflow()

            logging.info("Getting Batch Job Data ...")
            batchJobInfo = self.wmagentDB.getBatchJobInfo()

            logging.info("Getting Finished Task Data ...")
            finishedTasks = self.wmagentDB.getFinishedSubscriptionByTask()

            logging.info("Getting DBS PhEDEx upload status ...")
            completedWfs = self.dbsBufferUtil.getPhEDExDBSStatusForCompletedWorkflows(summary=True)

            # get the data from local workqueue:
            # request name, input dataset, inWMBS, inQueue
            logging.info("Getting Local Queue Data ...")
            localQInfo = {}
            if not hasattr(self.config, "Tier0Feeder"):
                localQInfo = self.localQueue.getAnalyticsData()
            else:
                logging.debug("Tier-0 instance, not checking WorkQueue")

            # combine all the data from 3 sources
            logging.info("""Combining data from
                                   Job Couch(%s),
                                   FWJR(%s),
                                   WorkflowsWithSkippedFile(%s),
                                   Batch Job(%s),
                                   Finished Tasks(%s),
                                   Local Queue(%s)
#.........这里部分代码省略.........
开发者ID:dmwm,项目名称:WMCore,代码行数:101,代码来源:AnalyticsPoller.py


注:本文中的WMComponent.DBS3Buffer.DBSBufferUtil.DBSBufferUtil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。