本文整理汇总了Python中DIRAC.Core.Utilities.ProcessPool.ProcessPool类的典型用法代码示例。如果您正苦于以下问题:Python ProcessPool类的具体用法?Python ProcessPool怎么用?Python ProcessPool使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ProcessPool类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TaskCallbacksTests
class TaskCallbacksTests(unittest.TestCase):
"""
.. class:: TaskCallbacksTests
test case for ProcessPool
"""
def setUp( self ):
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8 )
self.processPool.daemonize()
def testCallableClass( self ):
""" CallableClass and task callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
callback = ResultCallback,
exceptionCallback = ExceptionCallback,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and task callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
callback = ResultCallback,
exceptionCallback = ExceptionCallback,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
示例2: setUp
def setUp( self ):
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.Logger import gLogger
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8 )
self.processPool.daemonize()
示例3: submitJob
def submitJob(self, executableFile, proxy, **kwargs):
""" Method to submit job.
"""
if self.pPool is None:
self.pPool = ProcessPool(minSize=self.processors,
maxSize=self.processors,
poolCallback=self.finalizeJob)
self.pPool.processResults()
processorsInUse = self.getProcessorsInUse()
if kwargs.get('wholeNode'):
if processorsInUse > 0:
return S_ERROR('Can not take WholeNode job') # , %d/%d slots used' % (self.slotsInUse,self.slots) )
else:
requestedProcessors = self.processors
elif "numberOfProcessors" in kwargs:
requestedProcessors = int(kwargs['numberOfProcessors'])
if requestedProcessors > 0:
if (processorsInUse + requestedProcessors) > self.processors:
return S_ERROR('Not enough slots: requested %d, available %d' % (requestedProcessors,
self.processors - processorsInUse))
else:
requestedProcessors = 1
if self.processors - processorsInUse < requestedProcessors:
return S_ERROR('Not enough slots: requested %d, available %d' % (requestedProcessors,
self.processors - processorsInUse))
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
self.log.notice('Pilot Proxy:', pilotProxy)
kwargs = {'UseSudo': False}
if self.useSudo:
for nUser in range(MAX_NUMBER_OF_SUDO_UNIX_USERS):
if nUser not in self.userNumberPerTask.values():
break
kwargs['NUser'] = nUser
kwargs['PayloadUser'] = os.environ['USER'] + 'p%s' % str(nUser).zfill(2)
kwargs['UseSudo'] = True
result = self.pPool.createAndQueueTask(executeJob,
args=(executableFile, proxy, self.taskID),
kwargs=kwargs,
taskID=self.taskID,
usePoolCallbacks=True)
self.processorsPerTask[self.taskID] = requestedProcessors
self.taskID += 1
self.pPool.processResults()
return result
示例4: setUp
def setUp( self ):
"""c'tor
:param self: self reference
"""
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8,
poolCallback = self.poolCallback,
poolExceptionCallback = self.poolExceptionCallback )
self.processPool.daemonize()
示例5: __init__
def __init__( self, ceUniqueID, cores = 0 ):
""" Standard constructor.
"""
ComputingElement.__init__( self, ceUniqueID )
self.ceType = "Pool"
self.submittedJobs = 0
if cores > 0:
self.cores = cores
else:
self.cores = getNumberOfCores()
self.pPool = ProcessPool( self.cores, self.cores, poolCallback = self.finalizeJob )
self.taskID = 0
self.coresPerTask = {}
示例6: processPool
def processPool( self ):
""" facade for ProcessPool """
if not self.__processPool:
minProcess = max( 1, self.__minProcess )
maxProcess = max( self.__minProcess, self.__maxProcess )
queueSize = abs( self.__queueSize )
self.log.info( "ProcessPool: minProcess = %d maxProcess = %d queueSize = %d" % ( minProcess,
maxProcess,
queueSize ) )
self.__processPool = ProcessPool( minProcess,
maxProcess,
queueSize,
poolCallback = self.resultCallback,
poolExceptionCallback = self.exceptionCallback )
self.__processPool.daemonize()
return self.__processPool
示例7: processPool
def processPool( self ):
""" 'Live long and prosper, my dear ProcessPool'
- Mr. Spock
:param self: self reference
:return: brand new shiny ProcessPool instance on first call, the same instance
on subsequent calls
"""
if not self.__processPool:
minProcess = max( 1, self.__minProcess )
maxProcess = max( self.__minProcess, self.__maxProcess )
queueSize = abs(self.__queueSize)
self.log.info( "ProcessPool: minProcess = %d maxProcess = %d queueSize = %d" % ( minProcess,
maxProcess,
queueSize ) )
self.log.info( "ProcessPool: tasks will use callbacks attached to ProcessPool" )
self.__processPool = ProcessPool( minProcess,
maxProcess,
queueSize,
poolCallback = self.resultCallback,
poolExceptionCallback = self.exceptionCallback )
self.__processPool.daemonize()
self.log.info( "ProcessPool: daemonized and ready")
return self.__processPool
示例8: getCEStatus
def getCEStatus(self, jobIDList=None):
""" Method to return information on running and pending jobs.
:return: dictionary of numbers of jobs per status
"""
if self.pPool is None:
self.pPool = ProcessPool(minSize=self.processors,
maxSize=self.processors,
poolCallback=self.finalizeJob)
self.pPool.processResults()
result = S_OK()
result['SubmittedJobs'] = 0
nJobs = 0
for _j, value in self.processorsPerTask.iteritems():
if value > 0:
nJobs += 1
result['RunningJobs'] = nJobs
result['WaitingJobs'] = 0
processorsInUse = self.getProcessorsInUse()
result['UsedProcessors'] = processorsInUse
result['AvailableProcessors'] = self.processors - processorsInUse
return result
示例9: runTest
def runTest():
global nClients, nQueries, testType, resultTest, testDir, lfnListFile
resultTest = []
pp = ProcessPool(nClients)
testFunction = eval(testType)
for c in xrange(nClients):
pp.createAndQueueTask(testFunction, [nQueries],
callback=finalize,
exceptionCallback=doException)
pp.processAllResults(3600)
pp.finalize(0)
timeResult = []
for testTime, success, failure in resultTest:
# print testTime,success,failure
timeResult += testTime
averageTime, errorTime = doStats(timeResult)
rateResult = [nClients / t for t in timeResult]
averageRate, errorRate = doStats(rateResult)
if testDir:
print "\nTest results for clients %d, %s" % (nClients, testDir)
else:
print "\nTest results for clients %d, %s" % (nClients, lfnListFile)
print "Query time: %.2f +/- %.2f" % (averageTime, errorTime)
print "Query rate: %.2f +/- %.2f" % (averageRate, errorRate)
return((averageTime, errorTime), (averageRate, errorRate))
示例10: TaskTimeOutTests
class TaskTimeOutTests( unittest.TestCase ):
"""
.. class:: TaskTimeOutTests
test case for ProcessPool
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.Logger import gLogger
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 2,
4,
8,
poolCallback = self.poolCallback,
poolExceptionCallback = self.poolExceptionCallback )
self.processPool.daemonize()
def poolCallback( self, taskID, taskResult ):
self.log.always( "callback result for %s is %s" % ( taskID, taskResult ) )
def poolExceptionCallback( self, taskID, taskException ):
self.log.always( "callback exception for %s is %s" % ( taskID, taskException ) )
def testCallableClass( self ):
""" CallableClass and task time out test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint( 0, 5 ) * 10
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s timeWait=%s exception=%s" % ( i, timeWait, raiseException ) )
i += 1
else:
continue
if i == 16:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and task timeout test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5) * 5
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableFunc enqueued to task %s timeWait=%s exception=%s" % ( i, timeWait, raiseException ) )
i += 1
else:
continue
if i == 16:
break
self.processPool.finalize( 2 )
def testLockedClass( self ):
""" LockedCallableClass and task time out test """
for loop in range(2):
self.log.always( "loop %s" % loop )
i = 0
while i < 16:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5) * 5
raiseException = False
if timeWait == 5:
raiseException = True
klass = CallableClass
if timeWait >= 20:
klass = LockedCallableClass
result = self.processPool.createAndQueueTask( klass,
taskID = i,
args = ( i, timeWait, raiseException ),
timeOut = 15,
usePoolCallbacks = True,
blocking = True )
#.........这里部分代码省略.........
示例11: PoolComputingElement
class PoolComputingElement(ComputingElement):
mandatoryParameters = MandatoryParameters
#############################################################################
def __init__(self, ceUniqueID):
""" Standard constructor.
"""
ComputingElement.__init__(self, ceUniqueID)
self.ceType = "Pool"
self.log = gLogger.getSubLogger('Pool')
self.submittedJobs = 0
self.processors = 1
self.pPool = None
self.taskID = 0
self.processorsPerTask = {}
self.userNumberPerTask = {}
self.useSudo = False
#############################################################################
def _addCEConfigDefaults(self):
"""Method to make sure all necessary Configuration Parameters are defined
"""
# First assure that any global parameters are loaded
ComputingElement._addCEConfigDefaults(self)
def _reset(self):
self.processors = int(self.ceParameters.get('NumberOfProcessors', self.processors))
self.ceParameters['MaxTotalJobs'] = self.processors
self.useSudo = self.ceParameters.get('SudoExecution', False)
def getProcessorsInUse(self):
"""
"""
processorsInUse = 0
for task in self.processorsPerTask:
processorsInUse += self.processorsPerTask[task]
return processorsInUse
#############################################################################
def submitJob(self, executableFile, proxy, **kwargs):
""" Method to submit job.
"""
if self.pPool is None:
self.pPool = ProcessPool(minSize=self.processors,
maxSize=self.processors,
poolCallback=self.finalizeJob)
self.pPool.processResults()
processorsInUse = self.getProcessorsInUse()
if kwargs.get('wholeNode'):
if processorsInUse > 0:
return S_ERROR('Can not take WholeNode job') # , %d/%d slots used' % (self.slotsInUse,self.slots) )
else:
requestedProcessors = self.processors
elif "numberOfProcessors" in kwargs:
requestedProcessors = int(kwargs['numberOfProcessors'])
if requestedProcessors > 0:
if (processorsInUse + requestedProcessors) > self.processors:
return S_ERROR('Not enough slots: requested %d, available %d' % (requestedProcessors,
self.processors - processorsInUse))
else:
requestedProcessors = 1
if self.processors - processorsInUse < requestedProcessors:
return S_ERROR('Not enough slots: requested %d, available %d' % (requestedProcessors,
self.processors - processorsInUse))
ret = getProxyInfo()
if not ret['OK']:
pilotProxy = None
else:
pilotProxy = ret['Value']['path']
self.log.notice('Pilot Proxy:', pilotProxy)
kwargs = {'UseSudo': False}
if self.useSudo:
for nUser in range(MAX_NUMBER_OF_SUDO_UNIX_USERS):
if nUser not in self.userNumberPerTask.values():
break
kwargs['NUser'] = nUser
kwargs['PayloadUser'] = os.environ['USER'] + 'p%s' % str(nUser).zfill(2)
kwargs['UseSudo'] = True
result = self.pPool.createAndQueueTask(executeJob,
args=(executableFile, proxy, self.taskID),
kwargs=kwargs,
taskID=self.taskID,
usePoolCallbacks=True)
self.processorsPerTask[self.taskID] = requestedProcessors
self.taskID += 1
self.pPool.processResults()
return result
def finalizeJob(self, taskID, result):
""" Finalize the job
#.........这里部分代码省略.........
示例12: ProcessPoolCallbacksTests
class ProcessPoolCallbacksTests( unittest.TestCase ):
"""
.. class:: ProcessPoolCallbacksTests
test case for ProcessPool
"""
def setUp( self ):
"""c'tor
:param self: self reference
"""
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.FrameworkSystem.Client.Logger import gLogger
gLogger.showHeaders( True )
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.processPool = ProcessPool( 4, 8, 8,
poolCallback = self.poolCallback,
poolExceptionCallback = self.poolExceptionCallback )
self.processPool.daemonize()
def poolCallback( self, taskID, taskResult ):
self.log.always( "callback for %s result is %s" % ( taskID, taskResult ) )
def poolExceptionCallback( self, taskID, taskException ):
self.log.always( "callback for %s exception is %s" % ( taskID, taskException ) )
def testCallableClass( self ):
""" CallableClass and pool callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableClass,
taskID = i,
args = ( i, timeWait, raiseException ),
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableClass enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
def testCallableFunc( self ):
""" CallableFunc and pool callbacks test """
i = 0
while True:
if self.processPool.getFreeSlots() > 0:
timeWait = random.randint(0, 5)
raiseException = False
if not timeWait:
raiseException = True
result = self.processPool.createAndQueueTask( CallableFunc,
taskID = i,
args = ( i, timeWait, raiseException ),
usePoolCallbacks = True,
blocking = True )
if result["OK"]:
self.log.always("CallableFunc enqueued to task %s" % i )
i += 1
else:
continue
if i == 10:
break
self.processPool.finalize( 2 )
示例13: ProcessPool
for path in result['Value']['Directories']:
random.shuffle(lfcHosts)
#print pPool.getNumWorkingProcesses(), pPool.hasPendingTasks()
print "Queueing task for directory %s, lfc %s" % ( path, lfcHosts[0] )
result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
print "Failed queueing %s" % path
else:
print "Task failed: %s" % result['Message']
if 'Path' in result:
random.shuffle(lfcHosts)
print "Requeueing task for directory %s, lfc %s" % ( result['Path'], lfcHosts[0] )
#########################################################################
pPool = ProcessPool(30,40,0)
manager = Manager()
writerQueue = manager.Queue()
stopFlag = Value( 'i', 0 )
#pPool.daemonize()
# lfcHosts = ['lfc-lhcb-ro.cern.ch',
# 'lfc-lhcb-ro.cr.cnaf.infn.it',
# 'lhcb-lfc-fzk.gridka.de',
# 'lfc-lhcb-ro.in2p3.fr',
# 'lfc-lhcb.grid.sara.nl',
# 'lfclhcb.pic.es',
# 'lhcb-lfc.gridpp.rl.ac.uk']
lfcHosts = ['prod-lfc-lhcb-ro.cern.ch']
示例14: RequestExecutingAgent
class RequestExecutingAgent(AgentModule):
"""
.. class:: RequestExecutingAgent
request processing agent using ProcessPool, Operation handlers and RequestTask
"""
# # process pool
__processPool = None
# # request cache
__requestCache = {}
# # requests/cycle
__requestsPerCycle = 100
# # minimal nb of subprocess running
__minProcess = 2
# # maximal nb of subprocess executed same time
__maxProcess = 4
# # ProcessPool queue size
__queueSize = 20
# # file timeout
__fileTimeout = 300
# # operation timeout
__operationTimeout = 300
# # ProcessTask default timeout in seconds
__taskTimeout = 900
# # ProcessPool finalization timeout
__poolTimeout = 900
# # ProcessPool sleep time
__poolSleep = 5
# # placeholder for RequestClient instance
__requestClient = None
# # Size of the bulk if use of getRequests. If 0, use getRequest
__bulkRequest = 0
def __init__(self, *args, **kwargs):
""" c'tor """
# # call base class ctor
AgentModule.__init__(self, *args, **kwargs)
# # ProcessPool related stuff
self.__requestsPerCycle = self.am_getOption("RequestsPerCycle", self.__requestsPerCycle)
self.log.info("Requests/cycle = %d" % self.__requestsPerCycle)
self.__minProcess = self.am_getOption("MinProcess", self.__minProcess)
self.log.info("ProcessPool min process = %d" % self.__minProcess)
self.__maxProcess = self.am_getOption("MaxProcess", 4)
self.log.info("ProcessPool max process = %d" % self.__maxProcess)
self.__queueSize = self.am_getOption("ProcessPoolQueueSize", self.__queueSize)
self.log.info("ProcessPool queue size = %d" % self.__queueSize)
self.__poolTimeout = int(self.am_getOption("ProcessPoolTimeout", self.__poolTimeout))
self.log.info("ProcessPool timeout = %d seconds" % self.__poolTimeout)
self.__poolSleep = int(self.am_getOption("ProcessPoolSleep", self.__poolSleep))
self.log.info("ProcessPool sleep time = %d seconds" % self.__poolSleep)
self.__bulkRequest = self.am_getOption("BulkRequest", 0)
self.log.info("Bulk request size = %d" % self.__bulkRequest)
# # keep config path and agent name
self.agentName = self.am_getModuleParam("fullName")
self.__configPath = PathFinder.getAgentSection(self.agentName)
# # operation handlers over here
opHandlersPath = "%s/%s" % (self.__configPath, "OperationHandlers")
opHandlers = gConfig.getSections(opHandlersPath)
if not opHandlers["OK"]:
self.log.error(opHandlers["Message"])
raise AgentConfigError("OperationHandlers section not found in CS under %s" % self.__configPath)
opHandlers = opHandlers["Value"]
self.timeOuts = dict()
# # handlers dict
self.handlersDict = dict()
for opHandler in opHandlers:
opHandlerPath = "%s/%s/Location" % (opHandlersPath, opHandler)
opLocation = gConfig.getValue(opHandlerPath, "")
if not opLocation:
self.log.error("%s not set for %s operation handler" % (opHandlerPath, opHandler))
continue
self.timeOuts[opHandler] = {"PerFile": self.__fileTimeout, "PerOperation": self.__operationTimeout}
opTimeout = gConfig.getValue("%s/%s/TimeOut" % (opHandlersPath, opHandler), 0)
if opTimeout:
self.timeOuts[opHandler]["PerOperation"] = opTimeout
fileTimeout = gConfig.getValue("%s/%s/TimeOutPerFile" % (opHandlersPath, opHandler), 0)
if fileTimeout:
self.timeOuts[opHandler]["PerFile"] = fileTimeout
self.handlersDict[opHandler] = opLocation
self.log.info("Operation handlers:")
for item in enumerate(self.handlersDict.items()):
opHandler = item[1][0]
self.log.info("[%s] %s: %s (timeout: %d s + %d s per file)" % (item[0], item[1][0], item[1][1],
self.timeOuts[opHandler]['PerOperation'],
self.timeOuts[opHandler]['PerFile']))
# # common monitor activity
gMonitor.registerActivity("Iteration", "Agent Loops",
"RequestExecutingAgent", "Loops/min", gMonitor.OP_SUM)
gMonitor.registerActivity("Processed", "Request Processed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM)
gMonitor.registerActivity("Done", "Request Completed",
"RequestExecutingAgent", "Requests/min", gMonitor.OP_SUM)
#.........这里部分代码省略.........
示例15: RequestExecutingAgent
class RequestExecutingAgent( AgentModule ):
"""
.. class:: RequestExecutingAgent
request processing agent using ProcessPool, Operation handlers and RequestTask
"""
# # process pool
__processPool = None
# # request cache
__requestCache = {}
# # requests/cycle
__requestsPerCycle = 100
# # minimal nb of subprocess running
__minProcess = 2
# # maximal nb of subprocess executed same time
__maxProcess = 4
# # ProcessPool queue size
__queueSize = 20
# # file timeout
__fileTimeout = 300
# # operation timeout
__operationTimeout = 300
# # ProcessTask default timeout in seconds
__taskTimeout = 900
# # ProcessPool finalization timeout
__poolTimeout = 900
# # ProcessPool sleep time
__poolSleep = 5
# # placeholder for RequestClient instance
__requestClient = None
# # Size of the bulk if use of getRequests. If 0, use getRequest
__bulkRequest = 0
def __init__( self, *args, **kwargs ):
""" c'tor """
# # call base class ctor
AgentModule.__init__( self, *args, **kwargs )
# # ProcessPool related stuff
self.__requestsPerCycle = self.am_getOption( "RequestsPerCycle", self.__requestsPerCycle )
self.log.info( "Requests/cycle = %d" % self.__requestsPerCycle )
self.__minProcess = self.am_getOption( "MinProcess", self.__minProcess )
self.log.info( "ProcessPool min process = %d" % self.__minProcess )
self.__maxProcess = self.am_getOption( "MaxProcess", 4 )
self.log.info( "ProcessPool max process = %d" % self.__maxProcess )
self.__queueSize = self.am_getOption( "ProcessPoolQueueSize", self.__queueSize )
self.log.info( "ProcessPool queue size = %d" % self.__queueSize )
self.__poolTimeout = int( self.am_getOption( "ProcessPoolTimeout", self.__poolTimeout ) )
self.log.info( "ProcessPool timeout = %d seconds" % self.__poolTimeout )
self.__poolSleep = int( self.am_getOption( "ProcessPoolSleep", self.__poolSleep ) )
self.log.info( "ProcessPool sleep time = %d seconds" % self.__poolSleep )
self.__taskTimeout = int( self.am_getOption( "ProcessTaskTimeout", self.__taskTimeout ) )
self.log.info( "ProcessTask timeout = %d seconds" % self.__taskTimeout )
self.__bulkRequest = self.am_getOption( "BulkRequest", 0 )
self.log.info( "Bulk request size = %d" % self.__bulkRequest )
# # keep config path and agent name
self.agentName = self.am_getModuleParam( "fullName" )
self.__configPath = PathFinder.getAgentSection( self.agentName )
# # operation handlers over here
opHandlersPath = "%s/%s" % ( self.__configPath, "OperationHandlers" )
opHandlers = gConfig.getSections( opHandlersPath )
if not opHandlers["OK"]:
self.log.error( opHandlers["Message" ] )
raise AgentConfigError( "OperationHandlers section not found in CS under %s" % self.__configPath )
opHandlers = opHandlers["Value"]
self.timeOuts = dict()
# # handlers dict
self.handlersDict = dict()
for opHandler in opHandlers:
opHandlerPath = "%s/%s/Location" % ( opHandlersPath, opHandler )
opLocation = gConfig.getValue( opHandlerPath, "" )
if not opLocation:
self.log.error( "%s not set for %s operation handler" % ( opHandlerPath, opHandler ) )
continue
self.timeOuts[opHandler] = { "PerFile": self.__fileTimeout, "PerOperation": self.__operationTimeout }
opTimeout = gConfig.getValue( "%s/%s/TimeOut" % ( opHandlersPath, opHandler ), 0 )
if opTimeout:
self.timeOuts[opHandler]["PerOperation"] = opTimeout
fileTimeout = gConfig.getValue( "%s/%s/TimeOutPerFile" % ( opHandlersPath, opHandler ), 0 )
if fileTimeout:
self.timeOuts[opHandler]["PerFile"] = fileTimeout
self.handlersDict[opHandler] = opLocation
self.log.info( "Operation handlers:" )
for item in enumerate ( self.handlersDict.items() ):
opHandler = item[1][0]
self.log.info( "[%s] %s: %s (timeout: %d s + %d s per file)" % ( item[0], item[1][0], item[1][1],
self.timeOuts[opHandler]['PerOperation'],
self.timeOuts[opHandler]['PerFile'] ) )
# # common monitor activity
gMonitor.registerActivity( "Iteration", "Agent Loops",
"RequestExecutingAgent", "Loops/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "Processed", "Request Processed",
#.........这里部分代码省略.........