本文整理匯總了Python中retask.queue.Queue.enqueue方法的典型用法代碼示例。如果您正苦於以下問題:Python Queue.enqueue方法的具體用法?Python Queue.enqueue怎麽用?Python Queue.enqueue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類retask.queue.Queue
的用法示例。
在下文中一共展示了Queue.enqueue方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: notifier
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def notifier(self):
""" Connect to the instruction queue and notify bugyou to create a queue
for the plugin and start pushing the fedmsg messags.
"""
queue = Queue('instruction')
queue.connect()
for plugin in self.plugins:
try:
topic = self.config.get(plugin, 'topic')
except ConfigParser.NoOptionError:
log.error("Config does not exists")
if topic is None:
log.info("Config does not exists")
continue
payload = {
'type': 'create',
'queue_name': plugin,
'topic': topic,
}
task = Task(payload)
queue.enqueue(task)
if plugin in self.active_plugins:
Plugin = self.active_plugins[plugin]
plugin_obj = Plugin()
plugin_obj.initialize()
示例2: produce_jobs
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def produce_jobs(infox):
""" Queue the jobs into jobqueue
:args infox: list of dictionaries contains the image url and the buildid
"""
jobqueue = Queue('jobqueue')
jobqueue.connect()
session = init_model()
timestamp = datetime.datetime.now()
for info in infox:
jd = JobDetails(
taskid=info['buildid'],
status='q',
created_on=timestamp,
user='admin',
last_updated=timestamp)
session.add(jd)
session.commit()
job_details_id = jd.id
log.info('Save {jd_id} to database'.format(jd_id=job_details_id))
info.update({'job_id': jd.id})
task = Task(info)
jobqueue.enqueue(task)
log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))
publish_to_fedmsg(topic='image.queued', image_url=info['image_url'],
image_name=info['name'], status='queued',
buildid=info['buildid'], job_id=info['job_id'],
release=info['release'])
示例3: monitor_buildqueue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def monitor_buildqueue():
"""
This function monitors the build queue.
If the build is still on then it puts it back to the queue.
If the build is finished then it goes to the job queue.
"""
key = get_key('darkbuildqueue')
config = get_redis_config()
jobqueue = Queue('jobqueue', config)
jobqueue.connect()
buildqueue = Queue('buildqueue', config)
buildqueue.connect()
rdb = redis_connection()
if not rdb:
log(key, 'redis is missing', 'error')
return None
rdb.set('darkbuildqueue-status', '1')
while True:
if check_shutdown():
break
try:
time.sleep(60)
length = buildqueue.length
if length == 0:
log(key, "Sleeping, no buildqueue job", 'info')
time.sleep(60)
continue
task = buildqueue.dequeue()
kojiurl = task.data['kojiurl']
idx = task.data['jobid']
kc = koji.ClientSession(kojiurl, {'debug': False, 'password': None,\
'debug_xmlrpc': False, 'user': None})
res = kc.getBuild(idx)
if not res:
#We reached to the new build yet to start
#Time to sleep
log(key, "build deleted %s" % idx, 'error')
continue
if res['state'] == 1:
#completed build now push to our redis queue
jobqueue.enqueue(task)
log(key, "in job queue %s" % idx, 'info')
continue
if res['state'] == 0:
#building state
buildqueue.enqueue(task)
log(key, "in build queue %s" % idx, 'info')
continue
except Exception, error:
log(key, str(error), 'error')
示例4: produce_jobs
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def produce_jobs(infox):
""" Queue the jobs into jobqueue
:args infox: list of dictionaries contains the image url and the buildid
"""
jobqueue = Queue('jobqueue')
jobqueue.connect()
family_mapping = {
'Cloud_Base': 'b',
'Atomic': 'a'
}
session = init_model()
timestamp = datetime.datetime.now()
for info in infox:
image_name = info['path'].split('.x86_64')[0].split('/')[-1]
jd = ComposeJobDetails(
arch=info['arch'],
compose_id=info['compose']['id'],
created_on=timestamp,
family=family_mapping[info['subvariant']],
image_url=info['absolute_path'],
last_updated=timestamp,
release=info['compose']['release'],
status='q',
subvariant=info['subvariant'],
user='admin',
image_format=info['format'],
image_type=info['type'],
image_name=image_name,
)
session.add(jd)
session.commit()
job_details_id = jd.id
log.info('Save {jd_id} to database'.format(jd_id=job_details_id))
info.update({'job_id': jd.id})
task = Task(info)
jobqueue.enqueue(task)
log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))
publish_to_fedmsg(topic='image.queued',
compose_url=info['absolute_path'],
compose_id=info['compose']['id'],
image_name=image_name,
status='queued',
job_id=info['job_id'],
release=info['compose']['release'],
family=jd.family.value,
type=info['type'])
session.close()
示例5: update
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def update(data):
'''
Updates the git repo for the given user
:arg user: github username
:arg repo: Code repo name
'''
queue = Queue('puluupdates')
if not queue.connect():
return
task = Task(data=data, raw=True)
queue.enqueue(task)
示例6: upload_file
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def upload_file():
if request.method == "POST":
file = request.files["file"]
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(APP.config["UPLOAD_FOLDER"], filename))
# Now add the information in the queue for processing
t = Task({"filename": filename})
queue = Queue("incoming_files")
queue.connect()
queue.enqueue(t)
return "Log uploaded successfully."
return """
示例7: LogBot
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
class LogBot(irc.IRCClient):
"""A logging IRC bot."""
nickname = 'pyconsprints'
def __init__(self, channel):
self.chn = '#'+channel
self.qs_queue = []
self.logger = None
self.q = Queue('bug-messages')
self.q.connect()
self.channel_admin = ['kushal',]
def connectionMade(self):
irc.IRCClient.connectionMade(self)
self.islogging = False
self._namescallback = {}
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
self.islogging = False
def signedOn(self):
"""Called when bot has succesfully signed on to server."""
self.join(self.factory.channel)
def privmsg(self, user, channel, msg):
"""This will get called when the bot receives a message."""
user = user.split('!', 1)[0]
if user == BOTNAME:
print '[[%s]]' % msg
task = Task(msg)
self.q.enqueue(task)
user_cond = user in self.channel_admin
if msg == '#masters' and user_cond:
self.msg(self.chn, "My current masters are: %s" % ",".join(self.channel_admin))
if msg.startswith('#add:') and user_cond:
try:
name = msg.split()[1]
print name
self.channel_admin.append(name)
self.msg(self.chn,'%s is a master now.' % name)
except Exception, err:
print err
if msg.startswith('#test:') and user_cond:
bugid = msg.split()[1]
msg = 'dummy/issue%s' % bugid
task = Task(msg)
self.q.enqueue(task)
示例8: compile
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def compile(request):
"""
Enqueue the task to Queue
"""
filename = request.POST.get('filename', False)
text = request.POST.get('text', False)
if filename is False:
return HttpResponse(json.dumps({'output':'Invalid filename'}),
content_type="application/json")
if text is False:
return HttpResponse(json.dumps({'output':'Empty file'}),
content_type="application/json")
try:
queue = Queue('rcc')
queue.connect()
task = Task({'filename':filename, 'text':text})
job = queue.enqueue(task)
except:
return HttpResponse(json.dumps({'output':'Error creating Job'}),
content_type="application/json")
while True:
if job.result is None:
continue
break
return HttpResponse(json.dumps({'output' : job.result}),
content_type="application/json")
示例9: RedisQueue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
class RedisQueue(object):
def __init__(self, host, name, port=6379, password=None):
self.super_queue = Queue(
name,
{
'host': host,
'port': port,
'db': 0,
'password': password,
})
self.super_queue.connect()
def get(self):
return self.super_queue.wait()
def put(self, data):
self.super_queue.enqueue(Task(data))
示例10: compile
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def compile(request):
"""
Enqueue the task to Queue
"""
filename = request.POST.get('filename', False)
text = request.POST.get('text', False)
if filename is False:
return HttpResponse(json.dumps({'error':'Invalid filename'}))
if text is False:
return HttpResponse(json.dumps({'error':'Empty file'}))
try:
queue = Queue('rcc')
queue.connect()
task = Task({'filename':filename, 'text':text})
job = queue.enqueue(task)
except:
return HttpResponse(json.dumps({'error':'Error creating Job'}))
return HttpResponse(json.dumps({'status':'Job Created'}))
示例11: runTest
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
def runTest(self):
queue = Queue('testqueue')
queue.connect()
t = Task({'name':'kushal'})
self.assertTrue(queue.enqueue(t)[0])
示例12: Queue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
from retask.task import Task
from retask.queue import Queue
queue = Queue('example')
info1 = {'user': 'Fedora planet', 'url': 'http://planet.fedoraproject.org'}
task1 = Task(info1)
queue.connect()
job = queue.enqueue(task1)
job.wait()
print job.result
示例13: BackendConfigReader
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
# coding: utf-8
NUM_QUEUES = 2
import sys
sys.path.append("/usr/share/copr/")
from retask.task import Task
from retask.queue import Queue
from backend.helpers import BackendConfigReader
opts = BackendConfigReader().read()
redis_config = {
'host': opts['redis_host'],
'port': opts['redis_port'],
'db': opts['redis_db'],
}
for i in range(0, NUM_QUEUES):
print("## Queue {}".format(i))
q = Queue("copr-be-{}".format(i), config=redis_config)
q.connect()
save_q = []
while q.length != 0:
task = q.dequeue()
print task.data
save_q.append(task)
for t in save_q:
q.enqueue(t)
示例14: Queue
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
from retask.task import Task
from retask.queue import Queue
queue = Queue('example')
info1 = {'user':'kushal', 'url':'http://kushaldas.in'}
info2 = {'user':'fedora planet', 'url':'http://planet.fedoraproject.org'}
task1 = Task(info1)
task2 = Task(info2)
queue.connect()
queue.enqueue(task1)
queue.enqueue(task2)
示例15: Channel
# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import enqueue [as 別名]
class Channel(object):
"""
Abstraction above retask (the set of "channels" between backend(s),
jobgrabber and workers). We could use multiple backends and/or diffferent
"atomic" medium (other implemntation than Queue) in future. But
make sure nobody needs to touch the "medium" directly.
"""
def __init__(self, opts, log=None):
self.log = log
self.opts = opts
# channel for Backend <--> JobGrabber communication
self.jg_start = Queue("jg_control_start")
# channel for JobGrabber <--> [[Builders]] communication
self.build_queues = dict()
while not self.jg_start.connect():
wait_log(self.log, "waiting for redis", 5)
def _get_queue(self, bgroup):
if not bgroup in self.build_queues:
q_id = "copr-be-{0}".format(bgroup)
q = Queue(q_id)
if not q.connect():
# As we already connected to jg_control_message, this should
# be also OK.
raise Exception("can't connect to redis, should never happen!")
return q
return self.build_queues[bgroup]
def add_build(self, bgroup, build):
""" this should be used by job_grab only for now """
q = self._get_queue(bgroup)
try:
q.enqueue(Task(build))
except Exception as err:
# I've seen isses Task() was not able to jsonify urllib exceptions
if not self.log:
return False
self.log.error("can't enqueue build {0}, reason:\n{1}".format(
build, err
))
return True
# Builder's API
def get_build(self, bgroup):
"""
Return task from queue or return 0
"""
q = self._get_queue(bgroup)
t = q.dequeue()
return t.data if t else None
# JobGrab's API
def backend_started(self):
return self.jg_start.length
def job_graber_initialized(self):
while self.jg_start.dequeue():
pass
def remove_all_builds(self):
for bgroup in self.build_queues:
q = self._get_queue(bgroup)
while q.dequeue():
pass
self.build_queues = dict()
# Backend's API
def backend_start(self):
""" Notify jobgrab about service start. """
if not self.jg_start.enqueue(Task("start")):
raise Exception("can't append to retask queue, should never happen!")
while self.jg_start.length:
wait_log(self.log, "waiting until jobgrabber initializes queue")