本文整理汇总了Python中Ganga.Core.GangaThread.GangaThread类的典型用法代码示例。如果您正苦于以下问题:Python GangaThread类的具体用法?Python GangaThread怎么用?Python GangaThread使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了GangaThread类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, name):
is_critical = not config['enable_multiThreadMon']
GangaThread.__init__(self, name, critical=is_critical)
self._currently_running_command = False
self._running_cmd = None
self._running_args = None
self._thread_name = name
示例2: __init__
def __init__(self):
GangaThread.__init__(self, 'LGI_Pilot')
self.log = getLogger('LGI.Pilot.Thread')
if not os.path.exists(config['PilotScript']):
self.log.error('pilotjob script not found: '+config['PilotScript'])
if not os.path.exists(config['PilotDist']):
self.log.error('pilotjob tarball not found: '+config['PilotDist'])
示例3: __init__
def __init__(self, session_name, sdir, fn, repo, afs):
GangaThread.__init__(self, name="SessionLockRefresher", critical=False)
self.session_name = session_name
self.sdir = sdir
self.fns = [fn]
self.repos = [repo]
self.afs = afs
self.FileCheckTimes = {}
示例4: resolve_file_locations
def resolve_file_locations(dataset, sites=None, cloud=None, token='ATLASDATADISK', debug=False):
'''
Summarize the locations of files (in terms of sitename) of a dataset.
If the sites argument is given, ignoring cloud and token arguments;
otherwise using cloud and toke to retrieve sites from TiersOfATLAS.
'''
if not sites:
logger.debug('resolving sites with token: %s' % token)
sites = dm_util.get_srmv2_sites(cloud, token=token, debug=debug)
logger.debug('checking replicas at sites: %s' % str(sites))
replicas = {}
# preparing the queue for querying lfn
wq = Queue(len(sites))
for site in sites:
wq.put(site)
mylock = Lock()
def worker(id):
dq2 = DQ2()
while not wq.empty():
try:
site = wq.get(block=True, timeout=1)
replicaInfo = dq2.listFileReplicas(site, dataset)
logger.debug('resolving dataset files at %s, no files: %d' % (site,len(replicaInfo[0]['content'])) )
if replicaInfo:
mylock.acquire()
for guid in replicaInfo[0]['content']:
if guid not in replicas:
replicas[guid] = []
replicas[guid].append(site)
mylock.release()
except Empty:
pass
except DQException as err:
logger.warning(str(err))
logger.warning('site %s excluded' % site)
pass
threads = []
nthread = len(sites)
if nthread > 10: nthread = 10
for i in range(nthread):
t = GangaThread(name='stager_ds_w_%d' % i, target=worker, kwargs={'id': i})
# t.setDaemon(False)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
return replicas
示例5: __init__
def __init__(self):
GangaThread.__init__(self, 'LGI_Resource')
self.log = getLogger('LGI.Resource.Thread')
config = Config.getConfig('LGI')
if not os.path.exists(config['PilotDist']):
self.log.error('cannot connect to LGI server: pilotjob tarball not found: '+config['PilotDist'])
self.res = LGI.Resource(config['PilotDist'])
# number of queued LGI jobs
self.queued = None
示例6: __resolve_containers
def __resolve_containers(self, containers, nthreads=10):
'''resolving dataset containers'''
datasets = {}
wq = Queue(len(containers))
for ds in containers:
wq.put(ds)
mylock = Lock()
def worker(id):
dq2 = DQ2()
while not wq.empty():
try:
ds = wq.get(block=True, timeout=1)
logger.debug('worker id: %d on dataset container: %s' % (id, ds))
datasets[ds] = []
ds_tmp = dq2.listDatasetsInContainer(ds)
mylock.acquire()
datasets[ds] = ds_tmp
mylock.release()
except DQException as err:
logger.warning(str(err))
except Empty:
pass
profiler = ElapsedTimeProfiler(logger=logger)
profiler.start()
threads = []
for i in range(nthreads):
t = GangaThread(name='stager_ds_w_%d' % i, target=worker, kwargs={'id': i})
# t.setDaemon(False)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
profiler.check('resolving %d containers' % len(containers))
return datasets
示例7: __init__
def __init__(self, registry):
GangaThread.__init__(self, name="JobRegistry_Monitor")
log.debug("Constructing JobRegistry_Monitor")
self.setDaemon(True)
self.registry = registry
self.__sleepCounter = 0.0
self.__updateTimeStamp = time.time()
self.progressCallback = lambda x: None
self.callbackHookDict = {}
self.clientCallbackDict = {}
self.alive = True
self.enabled = False
# run the monitoring loop continuosly (steps=-1) or just a specified
# number of steps(>0)
self.steps = -1
self.activeBackends = {}
self.updateJobStatus = None
self.errors = {}
self.updateDict_ts = SynchronisedObject(UpdateDict())
# Create the default backend update method and add to callback hook.
self.makeUpdateJobStatusFunction()
# Add credential checking to monitoring loop
for _credObj in Credentials._allCredentials.itervalues():
log.debug("Setting callback hook for %s" % getName(_credObj))
self.setCallbackHook(self.makeCredCheckJobInsertor(_credObj), {}, True, timeout=config['creds_poll_rate'])
# Add low disk-space checking to monitoring loop
log.debug("Setting callback hook for disk space checking")
self.setCallbackHook(self.diskSpaceCheckJobInsertor, {}, True, timeout=config['diskspace_poll_rate'])
# synch objects
# main loop mutex
self.__mainLoopCond = threading.Condition()
# cleanup synch
self.__cleanUpEvent = threading.Event()
# asynch mon loop running synch
self.__monStepsTerminatedEvent = threading.Event()
# event to signal the break of job lists iterators
self.stopIter = threading.Event()
self.stopIter.set()
self._runningNow = False
示例8: startup
def startup(self):
""" Start a background thread that periodically run()s"""
super(TaskRegistry, self).startup()
from Ganga.Core.GangaThread import GangaThread
self._main_thread = GangaThread(name="GangaTasks", target=self._thread_main)
self._main_thread.start()
# create a registry flusher
self.flush_thread = RegistryFlusher(self)
self.flush_thread.start()
示例9: start
def start(self):
config = Config.getConfig("LGI")
if config["StatsInterval"] == 0:
self.log.debug("Not starting LGI stats thread because [LGI]StatsInterval is zero")
return
if not config["StatsFile"]:
self.log.debug("Not starting LGI stats thread because [LGI]StatsFile is empty")
return
if config["Enable"] is False:
self.log.debug("Not starting LGI stats thread because [LGI]Enable is False")
return False
return GangaThread.start(self)
示例10: __init_worker_threads
def __init_worker_threads(self, num_worker_threads, worker_thread_prefix):
if len(self.__worker_threads) > 0:
logger.warning("Threads already started!")
for i in self.__worker_threads:
logger.info("Worker Thread: %s is already running!" % i.gangaName)
return
for i in range(num_worker_threads):
t = GangaThread(name=worker_thread_prefix + str(i), auto_register=False, target=self.__worker_thread)
t._Thread__args = (t,)
t._name = worker_thread_prefix + str(i)
t._command = "idle"
t._timeout = "N/A"
t.start()
self.__worker_threads.append(t)
示例11: get_complete_files_replicas
def get_complete_files_replicas(self, nthread=10, diskOnly=True):
'''Gets a comprehensive dataset information about the contents and the
location of COMPLETE replicas'''
if not self.complete_files_replicas:
re_tapeSite = re.compile('.*TAPE$')
ds_info = {}
self.__expand_datasets()
wq = Queue(len(self.dataset))
for ds in self.dataset:
wq.put(ds)
mylock = Lock()
def worker(id):
dq2 = DQ2()
while not wq.empty():
try:
ds = wq.get(block=True, timeout=1)
logger.debug('worker id: %d on dataset: %s' % (id, ds))
# get contents (guids) of the complete dataset
contents = dq2.listFilesInDataset(ds)
# get locations of the complete dataset replicas
locations = dq2.listDatasetReplicas(ds,complete=1)
vuid = None
try:
vuid = locations.keys()[0]
except IndexError as err:
pass
mylock.acquire()
# updating ds_info hastable
if vuid:
ds_info[ds] = []
ds_sites = []
if diskOnly:
for site in locations[vuid][1]:
if not re_tapeSite.match(site):
ds_sites.append(site)
else:
ds_sites = locations[vuid][1]
ds_info[ds] += [ contents[0], ds_sites ]
else:
logger.warning('dataset not available: %s' % ds)
mylock.release()
except DQException as err:
logger.warning(str(err))
except Empty:
pass
# prepare and run the query threads
profiler = ElapsedTimeProfiler(logger=logger)
profiler.start()
threads = []
for i in range(nthread):
t = GangaThread(name='stager_ds_w_%d' % i, target=worker, kwargs={'id': i})
# t.setDaemon(False)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
self.complete_files_replicas = ds_info
profiler.check( 'information collected: %d datasets' % ( len(self.complete_files_replicas.keys()) ) )
else:
logger.debug('using cached complete_files_replicas')
pass
return self.complete_files_replicas
示例12: TaskRegistry
#.........这里部分代码省略.........
self[tid]._getWriteAccess()
self[tid].startup()
except RegistryError:
continue
except Exception as err:
logger.error("Unknown/Unexpected Error in starting up tasks main loop")
logger.error("Exiting: err=%s" % str(err))
return
logger.debug("Entering main loop")
# Main loop
while self._main_thread is not None and not self._main_thread.should_stop():
# For each task try to run it
if config['ForceTaskMonitoring'] or monitoring_component.enabled:
for tid in self.ids():
logger.debug("Running over tid: %s" % str(tid))
try:
from Ganga.GPIDev.Lib.Tasks import ITask
if isType(self[tid], ITask):
# for new ITasks, always need write access
self[tid]._getWriteAccess()
p = self[tid]
else:
if self[tid].status in ["running", "running/pause"]:
self[tid]._getWriteAccess()
p = self[tid]
elif self[tid].status is 'completed' and (self[tid].n_status('ready') or self[tid].n_status('running')):
self[tid].updateStatus()
continue
else:
continue
except RegistryError:
# could not acquire lock
continue
if self._main_thread.should_stop():
break
try:
from Ganga.GPIDev.Lib.Tasks import ITask
if isType(self[tid], ITask):
# for new ITasks, always call update()
p.update()
else:
# TODO: Make this user-configurable and add better
# error message
if (p.n_status("failed") * 100.0 / (20 + p.n_status("completed")) > 20):
p.pause()
logger.error("Task %s paused - %i jobs have failed while only %i jobs have completed successfully." % (
p.name, p.n_status("failed"), p.n_status("completed")))
logger.error(
"Please investigate the cause of the failing jobs and then remove the previously failed jobs using job.remove()")
logger.error(
"You can then continue to run this task with tasks(%i).run()" % p.id)
continue
numjobs = p.submitJobs()
if numjobs > 0:
self._flush([p])
# finalise any required transforms
p.finaliseTransforms()
p.updateStatus()
except Exception as x:
logger.error(
"Exception occurred in task monitoring loop: %s %s\nThe offending task was paused." % (x.__class__, x))
type_, value_, traceback_ = sys.exc_info()
logger.error("Full traceback:\n %s" % ' '.join(
traceback.format_exception(type_, value_, traceback_)))
p.pause()
if self._main_thread.should_stop():
break
if self._main_thread.should_stop():
break
logger.debug("TaskRegistry Sleeping for: %s seconds" % str(config['TaskLoopFrequency']))
# Sleep interruptible for 10 seconds
for i in range(0, int(config['TaskLoopFrequency'] * 100)):
if self._main_thread.should_stop():
break
time.sleep(0.01)
def startup(self):
""" Start a background thread that periodically run()s"""
super(TaskRegistry, self).startup()
from Ganga.Core.GangaThread import GangaThread
self._main_thread = GangaThread(name="GangaTasks", target=self._thread_main)
self._main_thread.start()
def shutdown(self):
super(TaskRegistry, self).shutdown()
def stop(self):
if self._main_thread is not None:
self._main_thread.stop()
示例13: hc_copy_thread
logger.info('HC Plot Summarize Thread: Disconnected.')
def hc_copy_thread():
test_sleep(60)
logger.info('HC Copy Thread: Connected to DB')
while (test_active() and not test_paused() and not ct.should_stop()):
logger.debug('HC Copy Thread: TOP OF MAIN LOOP')
for job in jobs:
if test_paused() or ct.should_stop():
break
copyJob(job)
test_sleep(30)
logger.info('HC Copy Thread: Disconnected from DB')
ct = GangaThread(name="HCCopyThread", target=hc_copy_thread)
pt = GangaThread(name="HCPlotSummary", target=hc_plot_summarize)
logger.info('Connected to DB')
if len(jobs):
ct.start()
pt.start()
while (test_active() and not test_paused()):
#We need to refresh the test object
test = Test.objects.get(pk=testid)
try:
print_summary()
示例14: process_subjob
for job in jobs:
for subjob in job.subjobs:
try:
process_subjob(job,subjob,thread_dirac_server)
except:
logger.warning('Exception in process_subjob:')
logger.warning(sys.exc_info()[0])
logger.warning(sys.exc_info()[1])
if test_paused() or ct.should_stop():
break
test_sleep(10)
logger.info('HC Monitor Thread: Disconnected from DB')
ct = GangaThread(name="HCMonitorThread", target=hc_monitor_thread)
logger.info('Connected to DB')
if len(jobs):
# Wait one minute, to let the minute counter update, and avoid
# problems with the at command submitting on second = 0
test_sleep(60)
ct.start()
test_sleep(60)
while (test_active() and not test_paused()):
test = Test.objects.get(pk=testid)
# logger.info('HC Copy Thread: TOP OF MAIN LOOP')
for job in jobs:
if not test_active() or test_paused() or ct.should_stop():
示例15: __init__
def __init__(self):
GangaThread.__init__(self, "LGI_Stats")
self.log = getLogger("LGI.Stats.Thread")