本文整理匯總了Python中retask.queue.Queue類的典型用法代碼示例。如果您正苦於以下問題:Python Queue類的具體用法?Python Queue怎麽用?Python Queue使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Queue類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: notifier
def notifier(self):
""" Connect to the instruction queue and notify bugyou to create a queue
for the plugin and start pushing the fedmsg messags.
"""
queue = Queue('instruction')
queue.connect()
for plugin in self.plugins:
try:
topic = self.config.get(plugin, 'topic')
except ConfigParser.NoOptionError:
log.error("Config does not exists")
if topic is None:
log.info("Config does not exists")
continue
payload = {
'type': 'create',
'queue_name': plugin,
'topic': topic,
}
task = Task(payload)
queue.enqueue(task)
if plugin in self.active_plugins:
Plugin = self.active_plugins[plugin]
plugin_obj = Plugin()
plugin_obj.initialize()
示例2: main
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
task_data = task.data
pos, num_images = task_data['pos']
compose_details = task_data['compose']
if pos == 1:
session = init_model()
compose_id = compose_details['id']
compose_obj = session.query(ComposeDetails).filter_by(
compose_id=compose_id).first()
compose_obj.status = u'r'
session.commit()
params = copy.deepcopy(compose_details)
params.update({'status': 'running'})
publish_to_fedmsg(topic='compose.running', **params)
result, running_status = auto_job(task_data)
示例3: main
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
log.debug("%s", task.data)
auto_job(task.data)
示例4: compile
def compile(request):
"""
Enqueue the task to Queue
"""
filename = request.POST.get('filename', False)
text = request.POST.get('text', False)
if filename is False:
return HttpResponse(json.dumps({'output':'Invalid filename'}),
content_type="application/json")
if text is False:
return HttpResponse(json.dumps({'output':'Empty file'}),
content_type="application/json")
try:
queue = Queue('rcc')
queue.connect()
task = Task({'filename':filename, 'text':text})
job = queue.enqueue(task)
except:
return HttpResponse(json.dumps({'output':'Error creating Job'}),
content_type="application/json")
while True:
if job.result is None:
continue
break
return HttpResponse(json.dumps({'output' : job.result}),
content_type="application/json")
示例5: produce_jobs
def produce_jobs(infox):
""" Queue the jobs into jobqueue
:args infox: list of dictionaries contains the image url and the buildid
"""
jobqueue = Queue('jobqueue')
jobqueue.connect()
session = init_model()
timestamp = datetime.datetime.now()
for info in infox:
jd = JobDetails(
taskid=info['buildid'],
status='q',
created_on=timestamp,
user='admin',
last_updated=timestamp)
session.add(jd)
session.commit()
job_details_id = jd.id
log.info('Save {jd_id} to database'.format(jd_id=job_details_id))
info.update({'job_id': jd.id})
task = Task(info)
jobqueue.enqueue(task)
log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))
publish_to_fedmsg(topic='image.queued', image_url=info['image_url'],
image_name=info['name'], status='queued',
buildid=info['buildid'], job_id=info['job_id'],
release=info['release'])
示例6: main
def main():
jobqueue = Queue('jobqueue')
jobqueue.connect()
while True:
task = jobqueue.wait()
task_data = task.data
pos, num_images = task_data['pos']
compose_details = task_data['compose']
if pos == 1:
session = init_model()
compose_id = compose_details['id']
compose_obj = session.query(ComposeDetails).filter_by(
compose_id=compose_id).first()
compose_status = compose_obj.status.code
# Here the check if the compose_status has completed 'c' is for
# failsafe. This condition is never to be hit. This is to avoid
# sending message to fedmsg.
if compose_status in ('r', 'c'):
log.info("Compose %s already running. Skipping sending to \
fedmsg" % compose_id)
else:
compose_obj.status = u'r'
session.commit()
params = copy.deepcopy(compose_details)
params.update({'status': 'running'})
publish_to_fedmsg(topic='compose.running', **params)
result, running_status = auto_job(task_data)
示例7: Worker
class Worker(object):
""" Represents the worker process. Waits for tasks to come in from the
webapp and then acts on them.
"""
def __init__(self):
self.queue = Queue('commits')
self.queue.connect()
# TODO -- set both of these with the config file.
# Use pyramid tools to load config.
self.sleep_interval = 1
self.scratch_dir = "/home/threebean/scratch/pep8bot-scratch"
try:
os.makedirs(self.scratch_dir)
except OSError:
pass # Assume that the scratch_dir already exists.
def run(self):
while True:
time.sleep(self.sleep_interval)
print "Waking"
if self.queue.length == 0:
continue
task = self.queue.dequeue()
data = task.data
url = data['repository']['url']
# TODO -- don't clone this url. But fork and clone our url.
name = data['repository']['name']
owner = data['repository']['owner']['name']
self.working_dir = tempfile.mkdtemp(
prefix=owner + '-' + name,
dir=self.scratch_dir,
)
print "** Cloning to", self.working_dir
print sh.git.clone(url, self.working_dir)
print "** Processing files."
for root, dirs, files in os.walk(self.working_dir):
if '.git' in root:
continue
for filename in files:
if filename.endswith(".py"):
infile = root + "/" + filename
print "** Tidying", infile
tmpfile = infile + ".bak"
script = os.path.expanduser(
"~/devel/PythonTidy/PythonTidy.py"
)
sh.python(script, infile, tmpfile)
shutil.move(tmpfile, infile)
with directory(self.working_dir):
print sh.pwd()
print sh.git.status()
示例8: _get_queue
def _get_queue(self, bgroup):
if not bgroup in self.build_queues:
q_id = "copr-be-{0}".format(bgroup)
q = Queue(q_id)
if not q.connect():
# As we already connected to jg_control_message, this should
# be also OK.
raise Exception("can't connect to redis, should never happen!")
return q
return self.build_queues[bgroup]
示例9: connect_queues
def connect_queues(self):
"""
Connects to the retask queues. One queue per builders group.
"""
for group in self.opts.build_groups:
queue = Queue("copr-be-{0}".format(group["id"]))
queue.connect()
self.task_queues_by_group[group["name"]] = queue
for arch in group["archs"]:
self.task_queues_by_arch[arch] = queue
示例10: produce_jobs
def produce_jobs(infox):
""" Queue the jobs into jobqueue
:args infox: list of dictionaries contains the image url and the buildid
"""
jobqueue = Queue('jobqueue')
jobqueue.connect()
family_mapping = {
'Cloud_Base': 'b',
'Atomic': 'a'
}
session = init_model()
timestamp = datetime.datetime.now()
for info in infox:
image_name = info['path'].split('.x86_64')[0].split('/')[-1]
jd = ComposeJobDetails(
arch=info['arch'],
compose_id=info['compose']['id'],
created_on=timestamp,
family=family_mapping[info['subvariant']],
image_url=info['absolute_path'],
last_updated=timestamp,
release=info['compose']['release'],
status='q',
subvariant=info['subvariant'],
user='admin',
image_format=info['format'],
image_type=info['type'],
image_name=image_name,
)
session.add(jd)
session.commit()
job_details_id = jd.id
log.info('Save {jd_id} to database'.format(jd_id=job_details_id))
info.update({'job_id': jd.id})
task = Task(info)
jobqueue.enqueue(task)
log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))
publish_to_fedmsg(topic='image.queued',
compose_url=info['absolute_path'],
compose_id=info['compose']['id'],
image_name=image_name,
status='queued',
job_id=info['job_id'],
release=info['compose']['release'],
family=jd.family.value,
type=info['type'])
session.close()
示例11: update
def update(data):
'''
Updates the git repo for the given user
:arg user: github username
:arg repo: Code repo name
'''
queue = Queue('puluupdates')
if not queue.connect():
return
task = Task(data=data, raw=True)
queue.enqueue(task)
示例12: BasePlugin
class BasePlugin(object):
__metaclass__ = abc.ABCMeta
def __init__(self, *args, **kwargs):
self.config = load_config(PLUGINS_CONFIG_FILEPATH)
self.active_services = get_active_services()
self.services = []
def initialize(self):
self.init_retask_connection()
self.load_services()
self.init_worker()
def init_retask_connection(self):
""" Connect to the retask queue for the plugin """
self.queue = Queue(self.plugin_name)
conn = self.queue.connect()
log.info("Initializing redis conection: %s" % self.plugin_name)
if not conn:
log.error("Could not connect to %s queue" % self.plugin_name)
return False
def consume(self):
while True:
task = self.queue.wait()
if task:
log.debug("Processing Message: %s" % task.data['msg']['body']['msg_id'])
self.process(task.data['msg'])
def init_worker(self):
""" Create a process and start consuming the messages """
process = multiprocessing.Process(target=self.consume)
process.start()
def load_services(self):
""" Load the services for the plugin """
services = self.config.get(self.plugin_name, 'services').split(',')
log.info("Start loading services")
for service in services:
self.services.append(self.active_services[service].load())
log.info("Complete loading services %s" % self.services)
@abc.abstractmethod
def process(self):
""" Consumes the messages from retask """
return
@abc.abstractmethod
def do_pagure(self):
""" Override to do activity related to pagure """
return
示例13: upload_file
def upload_file():
if request.method == "POST":
file = request.files["file"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(APP.config["UPLOAD_FOLDER"], filename))
# Now add the information in the queue for processing
t = Task({"filename": filename})
queue = Queue("incoming_files")
queue.connect()
queue.enqueue(t)
return "Log uploaded successfully."
return """
示例14: LogBot
class LogBot(irc.IRCClient):
"""A logging IRC bot."""
nickname = 'pyconsprints'
def __init__(self, channel):
self.chn = '#'+channel
self.qs_queue = []
self.logger = None
self.q = Queue('bug-messages')
self.q.connect()
self.channel_admin = ['kushal',]
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.islogging = False
self._namescallback = {}
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
self.islogging = False
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
self.join(self.factory.channel)
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
user = user.split('!', 1)[0]
if user == BOTNAME:
print '[[%s]]' % msg
task = Task(msg)
self.q.enqueue(task)
user_cond = user in self.channel_admin
if msg == '#masters' and user_cond:
self.msg(self.chn, "My current masters are: %s" % ",".join(self.channel_admin))
if msg.startswith('#add:') and user_cond:
try:
name = msg.split()[1]
print name
self.channel_admin.append(name)
self.msg(self.chn,'%s is a master now.' % name)
except Exception, err:
print err
if msg.startswith('#test:') and user_cond:
bugid = msg.split()[1]
msg = 'dummy/issue%s' % bugid
task = Task(msg)
self.q.enqueue(task)
示例15: init_task_queues
def init_task_queues(self):
"""
Connect to the retask.Queue for each group_id. Remove old tasks from queues.
"""
try:
for group in self.opts.build_groups:
group_id = group["id"]
queue = Queue("copr-be-{0}".format(group_id))
queue.connect()
self.task_queues[group_id] = queue
except ConnectionError:
raise CoprBackendError(
"Could not connect to a task queue. Is Redis running?")
self.clean_task_queues()