本文整理汇总了Python中pyLibrary.thread.threads.Thread.wait_for_shutdown_signal方法的典型用法代码示例。如果您正苦于以下问题:Python Thread.wait_for_shutdown_signal方法的具体用法?Python Thread.wait_for_shutdown_signal怎么用?Python Thread.wait_for_shutdown_signal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyLibrary.thread.threads.Thread
的用法示例。
在下文中一共展示了Thread.wait_for_shutdown_signal方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import wait_for_shutdown_signal [as 别名]
def main():
try:
settings = startup.read_settings(defs=[{
"name": ["--id"],
"help": "id(s) to process. Use \"..\" for a range.",
"type": str,
"dest": "id",
"required": False
}])
constants.set(settings.constants)
Log.start(settings.debug)
if settings.args.id:
etl_one(settings)
return
hg = HgMozillaOrg(settings=settings.hg)
resources = Dict(hg=dictwrap(hg))
stopper = Signal()
for i in range(coalesce(settings.param.threads, 1)):
ETL(
name="ETL Loop " + unicode(i),
work_queue=settings.work_queue,
resources=resources,
workers=settings.workers,
settings=settings.param,
please_stop=stopper
)
Thread.wait_for_shutdown_signal(stopper, allow_exit=True)
except Exception, e:
Log.error("Problem with etl", e)
示例2: main
# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import wait_for_shutdown_signal [as 别名]
def main():
try:
settings = startup.read_settings()
Log.start(settings.debug)
constants.set(settings.constants)
with startup.SingleInstance(flavor_id=settings.args.filename):
with aws.s3.Bucket(settings.destination) as bucket:
if settings.param.debug:
if settings.source.durable:
Log.error("Can not run in debug mode with a durable queue")
synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
else:
synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
if settings.source.durable:
synch.startup()
queue = PersistentQueue(settings.param.queue_file)
if queue:
last_item = queue[len(queue) - 1]
synch.source_key = last_item._meta.count + 1
with pulse.Consumer(settings=settings.source, target=None, target_queue=queue, start=synch.source_key):
Thread.run("pulse log loop", log_loop, settings, synch, queue, bucket)
Thread.wait_for_shutdown_signal(allow_exit=True)
Log.warning("starting shutdown")
queue.close()
Log.note("write shutdown state to S3")
synch.shutdown()
except Exception, e:
Log.error("Problem with etl", e)
示例3: main
# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import wait_for_shutdown_signal [as 别名]
def main():
global config
try:
config = startup.read_settings()
with startup.SingleInstance(flavor_id=config.args.filename):
constants.set(config.constants)
Log.start(config.debug)
es = elasticsearch.Cluster(config.destination).get_or_create_index(config.destination)
please_stop = Signal()
Thread.run("aggregator", loop_all_days, es, please_stop=please_stop)
Thread.wait_for_shutdown_signal(please_stop=please_stop, allow_exit=True)
except Exception, e:
Log.error("Serious problem with Test Failure Aggregator service! Shutdown completed!", cause=e)
示例4: Dict
# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import wait_for_shutdown_signal [as 别名]
Log.warning("Problem", cause=e)
resources = Dict(hg=HgMozillaOrg(settings=settings.hg))
stopper = Signal()
ETL(
name="ETL Loop Test",
work_queue=queue,
workers=settings.workers,
settings=settings.param,
resources=resources,
please_stop=stopper
)
aws.capture_termination_signal(stopper)
Thread.wait_for_shutdown_signal(stopper, allow_exit=True)
def parse_id_argument(id):
if id.find("..") >= 0:
#range of ids
min_, max_ = map(int, map(strings.trim, id.split("..")))
return map(unicode, range(min_, max_ + 1))
else:
return [id]
if __name__ == "__main__":
main()
示例5: main
# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import wait_for_shutdown_signal [as 别名]
def main():
global BATCH_SIZE
current_time = Date.now()
time_file = File(config.last_replication_time)
# SYNCH WITH source ES INDEX
source = elasticsearch.Index(config.source)
destination = elasticsearch.Cluster(config.destination).get_or_create_index(config.destination)
# GET LAST UPDATED
if config.since != None:
last_updated = Date(config.since).unix
else:
last_updated = get_last_updated(destination)
if config.batch_size:
BATCH_SIZE = config.batch_size
Log.note("updating records with {{primary_field}}>={{last_updated}}", last_updated=last_updated,
primary_field=config.primary_field)
please_stop = Signal()
done = Signal()
def worker(please_stop):
pending = Queue("pending ids", max=BATCH_SIZE*3, silent=False)
pending_thread = Thread.run(
"get pending",
get_pending,
source=source,
since=last_updated,
pending_bugs=pending,
please_stop=please_stop
)
diff_thread = Thread.run(
"diff",
diff,
source,
destination,
pending,
please_stop=please_stop
)
replication_thread = Thread.run(
"replication",
replicate,
source,
destination,
pending,
config.fix,
please_stop=please_stop
)
pending_thread.join()
diff_thread.join()
pending.add(Thread.STOP)
replication_thread.join()
done.go()
please_stop.go()
Thread.run("wait for replication to finish", worker, please_stop=please_stop)
Thread.wait_for_shutdown_signal(please_stop=please_stop)
if done:
Log.note("done all")
# RECORD LAST UPDATED< IF WE DID NOT CANCEL OUT
time_file.write(unicode(current_time.milli))