本文整理匯總了Python中retask.queue.Queue.wait方法的典型用法代碼示例。如果您正苦於以下問題:Python Queue.wait方法的具體用法?Python Queue.wait怎麽用?Python Queue.wait使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類retask.queue.Queue
的用法示例。
在下文中一共展示了Queue.wait方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
log.debug("%s", task.data)
auto_job(task.data)
示例2: main
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
task_data = task.data
pos, num_images = task_data['pos']
compose_details = task_data['compose']
if pos == 1:
session = init_model()
compose_id = compose_details['id']
compose_obj = session.query(ComposeDetails).filter_by(
compose_id=compose_id).first()
compose_status = compose_obj.status.code
# Here the check if the compose_status has completed 'c' is for
# failsafe. This condition is never to be hit. This is to avoid
# sending message to fedmsg.
if compose_status in ('r', 'c'):
log.info("Compose %s already running. Skipping sending to \
fedmsg" % compose_id)
else:
compose_obj.status = u'r'
session.commit()
params = copy.deepcopy(compose_details)
params.update({'status': 'running'})
publish_to_fedmsg(topic='compose.running', **params)
result, running_status = auto_job(task_data)
示例3: main
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
task_data = task.data
pos, num_images = task_data['pos']
compose_details = task_data['compose']
if pos == 1:
session = init_model()
compose_id = compose_details['id']
compose_obj = session.query(ComposeDetails).filter_by(
compose_id=compose_id).first()
compose_obj.status = u'r'
session.commit()
params = copy.deepcopy(compose_details)
params.update({'status': 'running'})
publish_to_fedmsg(topic='compose.running', **params)
result, running_status = auto_job(task_data)
示例4: BasePlugin
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
class BasePlugin(object):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
self.config = load_config(PLUGINS_CONFIG_FILEPATH)
self.active_services = get_active_services()
self.services = []
def initialize(self):
self.init_retask_connection()
self.load_services()
self.init_worker()
def init_retask_connection(self):
""" Connect to the retask queue for the plugin """
self.queue = Queue(self.plugin_name)
conn = self.queue.connect()
log.info("Initializing redis conection: %s" % self.plugin_name)
if not conn:
log.error("Could not connect to %s queue" % self.plugin_name)
return False
def consume(self):
while True:
task = self.queue.wait()
if task:
log.debug("Processing Message: %s" % task.data['msg']['body']['msg_id'])
self.process(task.data['msg'])
def init_worker(self):
""" Create a process and start consuming the messages """
process = multiprocessing.Process(target=self.consume)
process.start()
def load_services(self):
""" Load the services for the plugin """
services = self.config.get(self.plugin_name, 'services').split(',')
log.info("Start loading services")
for service in services:
self.services.append(self.active_services[service].load())
log.info("Complete loading services %s" % self.services)
@abc.abstractmethod
def process(self):
""" Consumes the messages from retask """
return
@abc.abstractmethod
def do_pagure(self):
""" Override to do activity related to pagure """
return
示例5: RedisQueue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
class RedisQueue(object):
def __init__(self, host, name, port=6379, password=None):
self.super_queue = Queue(
name,
{
'host': host,
'port': port,
'db': 0,
'password': password,
})
self.super_queue.connect()
def get(self):
return self.super_queue.wait()
def put(self, data):
self.super_queue.enqueue(Task(data))
示例6: main
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
def main():
queue = Queue('rcc')
queue.connect()
while True:
task = queue.wait()
name = task.data['filename']
print "Received", name
content = task.data['text']
destdir = writesource(name, content)
temp_path = os.path.join(destdir, name)
x = os.path.join(destdir, 'test')
out, err = system('gcc ' + temp_path + ' -o ' + x)
if err:
queue.send(task, err, 120)
else:
out1, err1 = system(x)
if err1:
queue.send(task, err1, 120)
else:
queue.send(task, out1, 120)
示例7: main
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
def main():
q = Queue('puluupdates')
q.connect()
while True:
task = q.wait()
data = task.data
user = data['repository']['owner']['name']
if user not in ['kushaldas']:
return
reponame = data['repository']['name']
names = set()
# Now go through all commits and find the unique directory names
for commit in data['commits']:
for fpath in commit['added']:
names.add(fpath.split('/')[0])
for fpath in commit['modified']:
names.add(fpath.split('/')[0])
# Now for each name, update the blog posts
for name in names:
if os.path.isdir(os.path.join('gitsources', user, name)):
blog_post(user, name,
os.path.join('gitsources', user, name), data['commits'])
reload_blog()
示例8: Queue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import wait [as 別名]
from retask.queue import Queue
import time
queue = Queue('example')
queue.connect()
task = queue.wait()
print task.data
time.sleep(15)
queue.send(task, "We received your information dear %s" % task.data['user'])