本文整理汇总了Python中ThreadPool.ThreadPool.is_empty方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool.is_empty方法的具体用法?Python ThreadPool.is_empty怎么用?Python ThreadPool.is_empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ThreadPool.ThreadPool
的用法示例。
在下文中一共展示了ThreadPool.is_empty方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: EventStager
# 需要导入模块: from ThreadPool import ThreadPool [as 别名]
# 或者: from ThreadPool.ThreadPool import is_empty [as 别名]
#.........这里部分代码省略.........
status, output = TimerCommand(cmd).run(600)
logging.info("Status %s output %s" % (status, output))
if status == 0:
os.rename(gfalFile + "copying", gfalFile + "finished")
return
else:
os.rename(gfalFile + "copying", gfalFile)
os.rename(gfalFile, gfalFile + "failed")
except:
logging.error("handleGfalFile %s" % traceback.format_exc())
finally:
self.__handlingOthers -= 1
def handleS3File(self, s3File):
try:
s3File = os.path.join(self.__workDir, s3File)
os.rename(s3File, s3File + "copying")
handle = open(s3File + "copying")
cmd = handle.read()
handle.close()
source, destination = cmd.split(" ")
logging.info("S3 stage out from %s to %s" % (source, destination))
ret_status, pilotErrorDiag, surl, size, checksum, self.arch_type = self.__siteMover.put_data(source, destination, lfn=os.path.basename(destination), report=self.__report, token=self.__token, experiment=self.__experiment, timeout=300)
logging.info("Status %s output %s" % (ret_status, pilotErrorDiag))
if ret_status == 0:
os.rename(s3File + "copying", s3File + "finished")
else:
os.rename(s3File + "copying", s3File)
except:
logging.error("handleS3File %s" % traceback.format_exc())
finally:
self.__handlingOthers -= 1
def handleOtherFiles(self):
gfalFiles = self.getUnstagedOutputFiles(".gfalcmd")
for gfalFile in gfalFiles:
p = multiprocessing.Process(target=self.handleGfalFile, args=(gfalFile,))
p.start()
self.__otherProcesses.append(p)
self.__handlingOthers += 1
self.__startWait = None
s3Files = self.getUnstagedOutputFiles(".s3cmd")
for s3File in s3Files:
p = multiprocessing.Process(target=self.handleS3File, args=(s3File,))
p.start()
self.__otherProcesses.append(p)
self.__handlingOthers += 1
self.__startWait = None
termProcesses = []
for p in self.__otherProcesses:
if not p.is_alive():
termProcesses.append(p)
for p in termProcesses:
self.__otherProcesses.remove(p)
def killStallProcess(self):
command = "find /proc -maxdepth 1 -user wguan -type d -mmin +1 -exec basename {} \; | xargs ps | grep EventStager.py | awk '{ print $1 }' | grep -v " + str(os.getpid()) + "|xargs kill"
print command
status, output = commands.getstatusoutput(command)
print status
print output
def run(self):
logging.info("Start to run")
self.cleanStagingFiles()
timeStart = time.time() - 60
while not self.isFinished():
try:
if (time.time() - timeStart) > 60:
self.renewEventStagerStatus()
self.cleanStagingFiles(20*60)
# self.checkMissedStagingFiles()
self.getEventRanges()
self.checkFailedStagingFiles()
self.checkFinishedStagingFiles()
if self.__canFinish and len(self.__eventRanges.keys()) == 0:
self.__status = 'finished'
self.renewEventStagerStatus()
if self.__threadpool.is_empty():
self.checkLostEvents()
timeStart = time.time()
self.handleOtherFiles()
time.sleep(30)
logging.debug("len(eventranges:%s)" % len(self.__eventRanges.keys()))
#logging.debug("%s" % self.__eventRanges)
logging.debug("otherProcesses:%s" % len(self.__otherProcesses))
if len(self.__eventRanges.keys()) == 0 and len(self.__otherProcesses) == 0:
self.cleanStagingFiles()
if self.__startWait == None:
self.__startWait = time.time()
self.killStallProcess()
if self.__startWait and (time.time() - self.__startWait) > self.__waitTime:
break
except:
logging.info(traceback.format_exc())
#sys.exit(1)
logging.info("Finished to run")