本文整理汇总了Python中DIRAC.Core.Utilities.DictCache.DictCache.get方法的典型用法代码示例。如果您正苦于以下问题:Python DictCache.get方法的具体用法?Python DictCache.get怎么用?Python DictCache.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类DIRAC.Core.Utilities.DictCache.DictCache
的用法示例。
在下文中一共展示了DictCache.get方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: StorageElementCache
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class StorageElementCache(object):
def __init__(self):
self.seCache = DictCache()
def __call__(self, name, plugins=None, vo=None, hideExceptions=False):
self.seCache.purgeExpired(expiredInSeconds=60)
tId = threading.current_thread().ident
if not vo:
result = getVOfromProxyGroup()
if not result['OK']:
return
vo = result['Value']
# Because the gfal2 context caches the proxy location,
# we also use the proxy location as a key.
# In practice, there should almost always be one, except for the REA
# If we see its memory consumtpion exploding, this might be a place to look
proxyLoc = getProxyLocation()
argTuple = (tId, name, plugins, vo, proxyLoc)
seObj = self.seCache.get(argTuple)
if not seObj:
seObj = StorageElementItem(name, plugins, vo, hideExceptions=hideExceptions)
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add(argTuple, 1800, seObj)
return seObj
示例2: __init__
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class PlotCache:
def __init__( self, plotsLocation = False ):
self.plotsLocation = plotsLocation
self.alive = True
self.__graphCache = DictCache( deleteFunction = _deleteGraph )
self.__graphLifeTime = 600
self.purgeThread = threading.Thread( target = self.purgeExpired )
self.purgeThread.setDaemon( 1 )
self.purgeThread.start()
def setPlotsLocation( self, plotsDir ):
self.plotsLocation = plotsDir
for plot in os.listdir( self.plotsLocation ):
if plot.find( ".png" ) > 0:
plotLocation = "%s/%s" % ( self.plotsLocation, plot )
gLogger.verbose( "Purging %s" % plotLocation )
os.unlink( plotLocation )
def purgeExpired( self ):
while self.alive:
time.sleep( self.__graphLifeTime )
self.__graphCache.purgeExpired()
def getPlot( self, plotHash, plotData, plotMetadata, subplotMetadata ):
"""
Get plot from the cache if exists, else generate it
"""
plotDict = self.__graphCache.get( plotHash )
if plotDict == False:
basePlotFileName = "%s/%s.png" % ( self.plotsLocation, plotHash )
if subplotMetadata:
retVal = graph( plotData, basePlotFileName, plotMetadata, metadata = subplotMetadata )
else:
retVal = graph( plotData, basePlotFileName, plotMetadata )
if not retVal[ 'OK' ]:
return retVal
plotDict = retVal[ 'Value' ]
if plotDict[ 'plot' ]:
plotDict[ 'plot' ] = os.path.basename( basePlotFileName )
self.__graphCache.add( plotHash, self.__graphLifeTime, plotDict )
return S_OK( plotDict )
def getPlotData( self, plotFileName ):
filename = "%s/%s" % ( self.plotsLocation, plotFileName )
try:
fd = file( filename, "rb" )
data = fd.read()
fd.close()
except Exception, v:
return S_ERROR( "Can't open file %s: %s" % ( plotFileName, str( v ) ) )
return S_OK( data )
示例3: StorageElementCache
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class StorageElementCache(object):
def __init__(self):
self.seCache = DictCache()
def __call__(self, name, protocols=None, vo=None, hideExceptions=False):
self.seCache.purgeExpired(expiredInSeconds=60)
argTuple = (name, protocols, vo)
seObj = self.seCache.get(argTuple)
if not seObj:
seObj = StorageElementItem(name, protocols, vo, hideExceptions=hideExceptions)
# Add the StorageElement to the cache for 1/2 hour
self.seCache.add(argTuple, 1800, seObj)
return seObj
示例4: RSSCache
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class RSSCache( object ):
'''
Cache with purgeThread integrated
'''
def __init__( self, lifeTime, updateFunc = None, cacheHistoryLifeTime = None ):
'''
Constructor
'''
self.__lifeTime = lifeTime
# lifetime of the history on hours
self.__cacheHistoryLifeTime = ( 1 and cacheHistoryLifeTime ) or 24
self.__updateFunc = updateFunc
# RSSCache
self.__rssCache = DictCache()
self.__rssCacheStatus = [] # ( updateTime, message )
self.__rssCacheLock = threading.Lock()
# Create purgeThread
self.__refreshStop = False
self.__refreshThread = threading.Thread( target = self.__refreshCacheThreadRun )
self.__refreshThread.setDaemon( True )
def startRefreshThread( self ):
'''
Run refresh thread.
'''
self.__refreshThread.start()
def stopRefreshThread( self ):
'''
Stop refresh thread.
'''
self.__refreshStop = True
def isCacheAlive( self ):
'''
Returns status of the cache refreshing thread
'''
return S_OK( self.__refreshThread.isAlive() )
def setLifeTime( self, lifeTime ):
'''
Set cache life time
'''
self.__lifeTime = lifeTime
def setCacheHistoryLifeTime( self, cacheHistoryLifeTime ):
'''
Set cache life time
'''
self.__cacheHistoryLifeTime = cacheHistoryLifeTime
def getCacheKeys( self ):
'''
List all the keys stored in the cache.
'''
self.__rssCacheLock.acquire()
keys = self.__rssCache.getKeys()
self.__rssCacheLock.release()
return S_OK( keys )
def acquireLock( self ):
'''
Acquires RSSCache lock
'''
self.__rssCacheLock.acquire()
def releaseLock( self ):
'''
Releases RSSCache lock
'''
self.__rssCacheLock.release()
def getCacheStatus( self ):
'''
Return the latest cache status
'''
self.__rssCacheLock.acquire()
if self.__rssCacheStatus:
res = dict( [ self.__rssCacheStatus[ 0 ] ] )
else:
res = {}
self.__rssCacheLock.release()
return S_OK( res )
def getCacheHistory( self ):
'''
Return the cache updates history
'''
self.__rssCacheLock.acquire()
res = dict( self.__rssCacheStatus )
self.__rssCacheLock.release()
return S_OK( res )
def get( self, resourceKey ):
'''
#.........这里部分代码省略.........
示例5: __init__
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class DataCache:
def __init__( self ):
self.graphsLocation = os.path.join( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), 'data', 'accountingPlots' )
self.cachedGraphs = {}
self.alive = True
self.purgeThread = threading.Thread( target = self.purgeExpired )
self.purgeThread.setDaemon( 1 )
self.purgeThread.start()
self.__dataCache = DictCache()
self.__graphCache = DictCache( deleteFunction = self._deleteGraph )
self.__dataLifeTime = 600
self.__graphLifeTime = 3600
def setGraphsLocation( self, graphsDir ):
self.graphsLocation = graphsDir
for graphName in os.listdir( self.graphsLocation ):
if graphName.find( ".png" ) > 0:
graphLocation = "%s/%s" % ( self.graphsLocation, graphName )
gLogger.verbose( "Purging %s" % graphLocation )
os.unlink( graphLocation )
def purgeExpired( self ):
while self.alive:
time.sleep( 600 )
self.__graphCache.purgeExpired()
self.__dataCache.purgeExpired()
def getReportData( self, reportRequest, reportHash, dataFunc ):
"""
Get report data from cache if exists, else generate it
"""
reportData = self.__dataCache.get( reportHash )
if reportData == False:
retVal = dataFunc( reportRequest )
if not retVal[ 'OK' ]:
return retVal
reportData = retVal[ 'Value' ]
self.__dataCache.add( reportHash, self.__dataLifeTime, reportData )
return S_OK( reportData )
def getReportPlot( self, reportRequest, reportHash, reportData, plotFunc ):
"""
Get report data from cache if exists, else generate it
"""
plotDict = self.__graphCache.get( reportHash )
if plotDict == False:
basePlotFileName = "%s/%s" % ( self.graphsLocation, reportHash )
retVal = plotFunc( reportRequest, reportData, basePlotFileName )
if not retVal[ 'OK' ]:
return retVal
plotDict = retVal[ 'Value' ]
if plotDict[ 'plot' ]:
plotDict[ 'plot' ] = "%s.png" % reportHash
if plotDict[ 'thumbnail' ]:
plotDict[ 'thumbnail' ] = "%s.thb.png" % reportHash
self.__graphCache.add( reportHash, self.__graphLifeTime, plotDict )
return S_OK( plotDict )
def getPlotData( self, plotFileName ):
filename = "%s/%s" % ( self.graphsLocation, plotFileName )
try:
fd = file( filename, "rb" )
data = fd.read()
fd.close()
except Exception, e:
return S_ERROR( "Can't open file %s: %s" % ( plotFileName, str( e ) ) )
return S_OK( data )
示例6: GatewayService
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class GatewayService( Service ):
GATEWAY_NAME = "Framework/Gateway"
def __init__( self ):
Service.__init__( self, GatewayService.GATEWAY_NAME )
self.__delegatedCredentials = DictCache()
self.__transferBytesLimit = 1024 * 1024 * 100
def initialize( self ):
#Build the URLs
self._url = self._cfg.getURL()
if not self._url:
return S_ERROR( "Could not build service URL for %s" % GatewayService.GATEWAY_NAME )
gLogger.verbose( "Service URL is %s" % self._url )
#Discover Handler
self._initMonitoring()
self._threadPool = ThreadPool( 1,
max( 0, self._cfg.getMaxThreads() ),
self._cfg.getMaxWaitingPetitions() )
self._threadPool.daemonize()
self._msgBroker = MessageBroker( "%sMSB" % GatewayService.GATEWAY_NAME, threadPool = self._threadPool )
self._msgBroker.useMessageObjects( False )
getGlobalMessageBroker().useMessageObjects( False )
self._msgForwarder = MessageForwarder( self._msgBroker )
return S_OK()
#Threaded process function
def _processInThread( self, clientTransport ):
#Handshake
try:
clientTransport.handshake()
except:
return
#Add to the transport pool
trid = self._transportPool.add( clientTransport )
if not trid:
return
#Receive and check proposal
result = self._receiveAndCheckProposal( trid )
if not result[ 'OK' ]:
self._transportPool.sendAndClose( trid, result )
return
proposalTuple = result[ 'Value' ]
#Instantiate handler
result = self.__getClientInitArgs( trid, proposalTuple )
if not result[ 'OK' ]:
self._transportPool.sendAndClose( trid, result )
return
clientInitArgs = result[ 'Value' ]
#Execute the action
result = self._processProposal( trid, proposalTuple, clientInitArgs )
#Close the connection if required
if result[ 'closeTransport' ]:
self._transportPool.close( trid )
return result
def _receiveAndCheckProposal( self, trid ):
clientTransport = self._transportPool.get( trid )
#Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
#Receive the action proposal
retVal = clientTransport.receiveData( 1024 )
if not retVal[ 'OK' ]:
gLogger.error( "Invalid action proposal", "%s %s" % ( self._createIdentityString( credDict,
clientTransport ),
retVal[ 'Message' ] ) )
return S_ERROR( "Invalid action proposal" )
proposalTuple = retVal[ 'Value' ]
gLogger.debug( "Received action from client", "/".join( list( proposalTuple[1] ) ) )
#Check if there are extra credentials
if proposalTuple[2]:
clientTransport.setExtraCredentials( proposalTuple[2] )
return S_OK( proposalTuple )
def __getClientInitArgs( self, trid, proposalTuple ):
clientTransport = self._transportPool.get( trid )
#Get the peer credentials
credDict = clientTransport.getConnectingCredentials()
if 'x509Chain' not in credDict:
return S_OK()
cKey = ( credDict[ 'DN' ],
credDict.get( 'group', False ),
credDict.get( 'extraCredentials', False ),
credDict[ 'isLimitedProxy' ] )
dP = self.__delegatedCredentials.get( cKey, 3600 )
idString = self._createIdentityString( credDict, clientTransport )
if dP:
gLogger.verbose( "Proxy for %s is cached" % idString )
return S_OK( dP )
result = self.__requestDelegation( clientTransport, credDict )
if not result[ 'OK' ]:
gLogger.warn( "Could not get proxy for %s: %s" % ( idString, result[ 'Message' ] ) )
return result
delChain = result[ 'Value' ]
delegatedChain = delChain.dumpAllToString()[ 'Value' ]
secsLeft = delChain.getRemainingSecs()[ 'Value' ] - 1
clientInitArgs = {
BaseClient.KW_SETUP : proposalTuple[0][1],
BaseClient.KW_TIMEOUT : 600,
#.........这里部分代码省略.........
示例7: __init__
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class CredentialsClient:
CONSUMER_GRACE_TIME = 3600
REQUEST_GRACE_TIME = 900
def __init__( self, RPCFunctor = None ):
if not RPCFunctor:
self.__RPCFunctor = RPCClient
else:
self.__RPCFunctor = RPCFunctor
self.__tokens = DictCache()
self.__requests = DictCache()
self.__consumers = DictCache( deleteFunction = self.__cleanConsumerCache )
def __getRPC( self ):
return self.__RPCFunctor( "WebAPI/Credentials" )
def __cleanReturn( self, result ):
if 'rpcStub' in result:
result.pop( 'rpcStub' )
return result
##
# Consumer
##
def generateConsumerPair( self, name, callback, icon, consumerKey = "" ):
result = self.__getRPC().generateConsumerPair( name, callback, icon, consumerKey )
if not result[ 'OK' ]:
return self.__cleanReturn( result )
self.__consumers.add( consumerKey, self.CONSUMER_GRACE_TIME, result[ 'Value' ] )
return self.__cleanReturn( result )
def getConsumerData( self, consumerKey ):
cData = self.__consumers.get( consumerKey )
if cData:
return S_OK( cData )
result = self.__getRPC().getConsumerData( consumerKey )
if not result[ 'OK' ]:
return self.__cleanReturn( result )
self.__consumers.add( consumerKey, self.CONSUMER_GRACE_TIME, result[ 'Value' ] )
return self.__cleanReturn( result )
def deleteConsumer( self, consumerKey ):
self.__consumers.delete( consumerKey )
result = self.__getRPC().deleteConsumer( consumerKey )
if result[ 'OK' ]:
self.__cleanConsumerCache( { 'key' : consumerKey } )
return self.__cleanReturn( result )
def getAllConsumers( self ):
result = self.__getRPC().getAllConsumers()
if not result[ 'OK' ]:
return self.__cleanReturn( result )
data = result[ 'Value' ]
consIndex = { 'key': 0,
'name' : 0,
'callback' : 0,
'secret' : 0,
'icon' : 0 }
for key in consIndex:
consIndex[ key ] = data[ 'Parameters' ].find( key )
for record in data[ 'Records' ]:
consData = {}
for key in consIndex:
consData[ key ] = record[ consIndex[ key ] ]
self.__consumers.add( consData[ 'key' ], self.CONSUMER_GRACE_TIME, consData )
return self.__cleanReturn( result )
def __cleanConsumerCache( self, cData ):
consumerKey = cData[ 'key' ]
for dc in ( self.__tokens, self.__requests ):
cKeys = dc.getKeys()
for cKey in cKeys:
if cKey[0] == consumerKey:
dc.delete( cKey )
##
# Requests
##
def generateRequest( self, consumerKey, callback = "" ):
result = self.__getRPC().generateRequest( consumerKey, callback )
if not result[ 'OK' ]:
return self.__cleanReturn( result )
requestData = result[ 'Value' ]
self.__requests.add( requestData[ 'request' ], result[ 'lifeTime' ] - 5, requestData )
return self.__cleanReturn( result )
def getRequestData( self, request ):
data = self.__requests.get( request )
if data:
return S_OK( data )
result = self.__getRPC().getRequestData( request )
if not result[ 'OK' ]:
return self.__cleanReturn( result )
self.__tokens.add( request, result[ 'lifeTime' ] - 5, result[ 'Value' ] )
return self.__cleanReturn( result )
def deleteRequest( self, request ):
#.........这里部分代码省略.........
示例8: Cache
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class Cache( object ):
"""
Cache basic class.
WARNING: None of its methods is thread safe. Acquire / Release lock when
using them !
"""
def __init__( self, lifeTime, updateFunc ):
"""
Constructor
:Parameters:
**lifeTime** - `int`
Lifetime of the elements in the cache ( seconds ! )
**updateFunc** - `function`
This function MUST return a S_OK | S_ERROR object. In the case of the first,
its value must be a dictionary.
"""
# We set a 20% of the lifetime randomly, so that if we have thousands of jobs
# starting at the same time, all the caches will not end at the same time.
randomLifeTimeBias = 0.2 * random.random()
self.log = gLogger.getSubLogger( self.__class__.__name__ )
self.__lifeTime = int( lifeTime * ( 1 + randomLifeTimeBias ) )
self.__updateFunc = updateFunc
# The records returned from the cache must be valid at least 10 seconds.
self.__validSeconds = 10
# Cache
self.__cache = DictCache()
self.__cacheLock = LockRing()
self.__cacheLock.getLock( self.__class__.__name__ )
#.............................................................................
# internal cache object getter
def cacheKeys( self ):
"""
Cache keys getter
:returns: list with valid keys on the cache
"""
return self.__cache.getKeys( validSeconds = self.__validSeconds )
#.............................................................................
# acquire / release Locks
def acquireLock( self ):
"""
Acquires Cache lock
"""
self.__cacheLock.acquire( self.__class__.__name__ )
def releaseLock( self ):
"""
Releases Cache lock
"""
self.__cacheLock.release( self.__class__.__name__)
#.............................................................................
# Cache getters
def get( self, cacheKeys ):
"""
Gets values for cacheKeys given, if all are found ( present on the cache and
valid ), returns S_OK with the results. If any is not neither present not
valid, returns S_ERROR.
:Parameters:
**cacheKeys** - `list`
list of keys to be extracted from the cache
:return: S_OK | S_ERROR
"""
result = {}
for cacheKey in cacheKeys:
cacheRow = self.__cache.get( cacheKey, validSeconds = self.__validSeconds )
if not cacheRow:
self.log.error( str( cacheKey ) )
return S_ERROR( 'Cannot get %s' % str( cacheKey ) )
result.update( { cacheKey : cacheRow } )
return S_OK( result )
#.............................................................................
# Cache refreshers
def refreshCache( self ):
"""
Purges the cache and gets fresh data from the update function.
#.........这里部分代码省略.........
示例9: __init__
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class ProxyManagerClient:
__metaclass__ = DIRACSingleton.DIRACSingleton
def __init__( self ):
self.__usersCache = DictCache()
self.__proxiesCache = DictCache()
self.__vomsProxiesCache = DictCache()
self.__pilotProxiesCache = DictCache()
self.__filesCache = DictCache( self.__deleteTemporalFile )
def __deleteTemporalFile( self, filename ):
try:
os.unlink( filename )
except:
pass
def clearCaches( self ):
self.__usersCache.purgeAll()
self.__proxiesCache.purgeAll()
self.__vomsProxiesCache.purgeAll()
self.__pilotProxiesCache.purgeAll()
def __getSecondsLeftToExpiration( self, expiration, utc = True ):
if utc:
td = expiration - datetime.datetime.utcnow()
else:
td = expiration - datetime.datetime.now()
return td.days * 86400 + td.seconds
def __refreshUserCache( self, validSeconds = 0 ):
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
retVal = rpcClient.getRegisteredUsers( validSeconds )
if not retVal[ 'OK' ]:
return retVal
data = retVal[ 'Value' ]
#Update the cache
for record in data:
cacheKey = ( record[ 'DN' ], record[ 'group' ] )
self.__usersCache.add( cacheKey,
self.__getSecondsLeftToExpiration( record[ 'expirationtime' ] ),
record )
return S_OK()
@gUsersSync
def userHasProxy( self, userDN, userGroup, validSeconds = 0 ):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = ( userDN, userGroup )
if self.__usersCache.exists( cacheKey, validSeconds ):
return S_OK( True )
#Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose( "Updating list of users in proxy management" )
retVal = self.__refreshUserCache( validSeconds )
if not retVal[ 'OK' ]:
return retVal
return S_OK( self.__usersCache.exists( cacheKey, validSeconds ) )
@gUsersSync
def getUserPersistence( self, userDN, userGroup, validSeconds = 0 ):
"""
Check if a user(DN-group) has a proxy in the proxy management
- Updates internal cache if needed to minimize queries to the
service
"""
cacheKey = ( userDN, userGroup )
userData = self.__usersCache.get( cacheKey, validSeconds )
if userData:
if userData[ 'persistent' ]:
return S_OK( True )
#Get list of users from the DB with proxys at least 300 seconds
gLogger.verbose( "Updating list of users in proxy management" )
retVal = self.__refreshUserCache( validSeconds )
if not retVal[ 'OK' ]:
return retVal
userData = self.__usersCache.get( cacheKey, validSeconds )
if userData:
return S_OK( userData[ 'persistent' ] )
return S_OK( False )
def setPersistency( self, userDN, userGroup, persistent ):
"""
Set the persistency for user/group
"""
#Hack to ensure bool in the rpc call
persistentFlag = True
if not persistent:
persistentFlag = False
rpcClient = RPCClient( "Framework/ProxyManager", timeout = 120 )
retVal = rpcClient.setPersistency( userDN, userGroup, persistentFlag )
if not retVal[ 'OK' ]:
return retVal
#Update internal persistency cache
cacheKey = ( userDN, userGroup )
record = self.__usersCache.get( cacheKey, 0 )
if record:
record[ 'persistent' ] = persistentFlag
self.__usersCache.add( cacheKey,
#.........这里部分代码省略.........
示例10: Limiter
# 需要导入模块: from DIRAC.Core.Utilities.DictCache import DictCache [as 别名]
# 或者: from DIRAC.Core.Utilities.DictCache.DictCache import get [as 别名]
class Limiter(object):
def __init__(self, jobDB=None, opsHelper=None):
""" Constructor
"""
self.__runningLimitSection = "JobScheduling/RunningLimit"
self.__matchingDelaySection = "JobScheduling/MatchingDelay"
self.csDictCache = DictCache()
self.condCache = DictCache()
self.delayMem = {}
if jobDB:
self.jobDB = jobDB
else:
self.jobDB = JobDB()
self.log = gLogger.getSubLogger("Limiter")
if opsHelper:
self.__opsHelper = opsHelper
else:
self.__opsHelper = Operations()
def getNegativeCond(self):
""" Get negative condition for ALL sites
"""
orCond = self.condCache.get("GLOBAL")
if orCond:
return orCond
negCond = {}
# Run Limit
result = self.__opsHelper.getSections(self.__runningLimitSection)
sites = []
if result['OK']:
sites = result['Value']
for siteName in sites:
result = self.__getRunningCondition(siteName)
if not result['OK']:
continue
data = result['Value']
if data:
negCond[siteName] = data
# Delay limit
result = self.__opsHelper.getSections(self.__matchingDelaySection)
sites = []
if result['OK']:
sites = result['Value']
for siteName in sites:
result = self.__getDelayCondition(siteName)
if not result['OK']:
continue
data = result['Value']
if not data:
continue
if siteName in negCond:
negCond[siteName] = self.__mergeCond(negCond[siteName], data)
else:
negCond[siteName] = data
orCond = []
for siteName in negCond:
negCond[siteName]['Site'] = siteName
orCond.append(negCond[siteName])
self.condCache.add("GLOBAL", 10, orCond)
return orCond
def getNegativeCondForSite(self, siteName):
""" Generate a negative query based on the limits set on the site
"""
# Check if Limits are imposed onto the site
negativeCond = {}
if self.__opsHelper.getValue("JobScheduling/CheckJobLimits", True):
result = self.__getRunningCondition(siteName)
if result['OK']:
negativeCond = result['Value']
self.log.verbose('Negative conditions for site %s after checking limits are: %s' % (siteName, str(negativeCond)))
if self.__opsHelper.getValue("JobScheduling/CheckMatchingDelay", True):
result = self.__getDelayCondition(siteName)
if result['OK']:
delayCond = result['Value']
self.log.verbose('Negative conditions for site %s after delay checking are: %s' % (siteName, str(delayCond)))
negativeCond = self.__mergeCond(negativeCond, delayCond)
if negativeCond:
self.log.info('Negative conditions for site %s are: %s' % (siteName, str(negativeCond)))
return negativeCond
def __mergeCond(self, negCond, addCond):
""" Merge two negative dicts
"""
# Merge both negative dicts
for attr in addCond:
if attr not in negCond:
negCond[attr] = []
for value in addCond[attr]:
if value not in negCond[attr]:
negCond[attr].append(value)
return negCond
#.........这里部分代码省略.........