當前位置: 首頁>>代碼示例>>Python>>正文


Python Queue.connect方法代碼示例

本文整理匯總了Python中retask.queue.Queue.connect方法的典型用法代碼示例。如果您正苦於以下問題:Python Queue.connect方法的具體用法?Python Queue.connect怎麽用?Python Queue.connect使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在retask.queue.Queue的用法示例。


在下文中一共展示了Queue.connect方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()
    while True:
        task = jobqueue.wait()
        log.debug("%s", task.data)
        auto_job(task.data)
開發者ID:TridevGuha,項目名稱:autocloud,代碼行數:9,代碼來源:autocloud_job.py

示例2: notifier

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
    def notifier(self):
        """ Connect to the instruction queue and notify bugyou to create a queue
        for the plugin and start pushing the fedmsg messags.
        """
        queue = Queue('instruction')
        queue.connect()
        for plugin in self.plugins:
            try:
                topic = self.config.get(plugin, 'topic')
            except ConfigParser.NoOptionError:
                log.error("Config does not exists")
            if topic is None:
                log.info("Config does not exists")
                continue

            payload = {
                'type': 'create',
                'queue_name': plugin,
                'topic': topic,
            }
            task = Task(payload)
            queue.enqueue(task)

            if plugin in self.active_plugins:
                Plugin = self.active_plugins[plugin]
                plugin_obj = Plugin()
                plugin_obj.initialize()
開發者ID:sayanchowdhury,項目名稱:bugyou_plugins,代碼行數:29,代碼來源:cntrl.py

示例3: compile

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def compile(request):
    """
    Enqueue the task to Queue
    """
    filename = request.POST.get('filename', False)
    text = request.POST.get('text', False)

    if filename is False:
        return HttpResponse(json.dumps({'output':'Invalid filename'}),
                            content_type="application/json")

    if text is False:
        return HttpResponse(json.dumps({'output':'Empty file'}),
                            content_type="application/json")

    try:
        queue = Queue('rcc')
        queue.connect()
        task = Task({'filename':filename, 'text':text})
        job = queue.enqueue(task)
    except:
        return HttpResponse(json.dumps({'output':'Error creating Job'}),
                            content_type="application/json")

    while True:
        if job.result is None:
            continue
        break

    return HttpResponse(json.dumps({'output' : job.result}),
                        content_type="application/json")
開發者ID:birajkarmakar,項目名稱:rcc,代碼行數:33,代碼來源:views.py

示例4: main

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()
            compose_obj.status = u'r'
            session.commit()


            params = copy.deepcopy(compose_details)
            params.update({'status': 'running'})
            publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
開發者ID:gobindaakhuli,項目名稱:fedora-infra-ansible,代碼行數:28,代碼來源:autocloud_job.py

示例5: main

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def main():
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    while True:
        task = jobqueue.wait()

        task_data = task.data
        pos, num_images = task_data['pos']

        compose_details = task_data['compose']

        if pos == 1:
            session = init_model()
            compose_id = compose_details['id']
            compose_obj = session.query(ComposeDetails).filter_by(
                compose_id=compose_id).first()

            compose_status = compose_obj.status.code

            # Here the check if the compose_status has completed 'c' is for
            # failsafe. This condition is never to be hit. This is to avoid
            # sending message to fedmsg.
            if compose_status in ('r', 'c'):
                log.info("Compose %s already running. Skipping sending to \
                fedmsg" % compose_id)
            else:
                compose_obj.status = u'r'
                session.commit()

                params = copy.deepcopy(compose_details)
                params.update({'status': 'running'})
                publish_to_fedmsg(topic='compose.running', **params)

        result, running_status = auto_job(task_data)
開發者ID:abhishekg785,項目名稱:autocloud,代碼行數:37,代碼來源:autocloud_job.py

示例6: produce_jobs

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        jd = JobDetails(
            taskid=info['buildid'],
            status='q',
            created_on=timestamp,
            user='admin',
            last_updated=timestamp)
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued', image_url=info['image_url'],
                          image_name=info['name'], status='queued',
                          buildid=info['buildid'], job_id=info['job_id'],
                          release=info['release'])
開發者ID:ralphbean,項目名稱:autocloud,代碼行數:33,代碼來源:__init__.py

示例7: Worker

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
class Worker(object):
    """ Represents the worker process.  Waits for tasks to come in from the
    webapp and then acts on them.
    """

    def __init__(self):
        self.queue = Queue('commits')
        self.queue.connect()
        # TODO -- set both of these with the config file.
        # Use pyramid tools to load config.
        self.sleep_interval = 1
        self.scratch_dir = "/home/threebean/scratch/pep8bot-scratch"
        try:
            os.makedirs(self.scratch_dir)
        except OSError:
            pass  # Assume that the scratch_dir already exists.

    def run(self):
        while True:
            time.sleep(self.sleep_interval)
            print "Waking"
            if self.queue.length == 0:
                continue

            task = self.queue.dequeue()
            data = task.data
            url = data['repository']['url']

            # TODO -- don't clone this url.  But fork and clone our url.

            name = data['repository']['name']
            owner = data['repository']['owner']['name']
            self.working_dir = tempfile.mkdtemp(
                prefix=owner + '-' + name,
                dir=self.scratch_dir,
            )
            print "** Cloning to", self.working_dir
            print sh.git.clone(url, self.working_dir)
            print "** Processing files."
            for root, dirs, files in os.walk(self.working_dir):

                if '.git' in root:
                    continue

                for filename in files:
                    if filename.endswith(".py"):
                        infile = root + "/" + filename
                        print "** Tidying", infile
                        tmpfile = infile + ".bak"
                        script = os.path.expanduser(
                            "~/devel/PythonTidy/PythonTidy.py"
                        )
                        sh.python(script, infile, tmpfile)
                        shutil.move(tmpfile, infile)

            with directory(self.working_dir):
                print sh.pwd()
                print sh.git.status()
開發者ID:pep8bot,項目名稱:pep8bot,代碼行數:60,代碼來源:worker.py

示例8: connect_queues

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
    def connect_queues(self):
        """
        Connects to the retask queues. One queue per builders group.
        """
        for group in self.opts.build_groups:
            queue = Queue("copr-be-{0}".format(group["id"]))
            queue.connect()

            self.task_queues_by_group[group["name"]] = queue
            for arch in group["archs"]:
                self.task_queues_by_arch[arch] = queue
開發者ID:evilkost,項目名稱:copr,代碼行數:13,代碼來源:job_grab.py

示例9: monitor_buildqueue

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def monitor_buildqueue():
    """
    This function monitors the build queue.

    If the build is still on then it puts it back to the queue.
    If the build is finished then it goes to the job queue.
    """
    key = get_key('darkbuildqueue')
    config = get_redis_config()
    jobqueue = Queue('jobqueue', config)
    jobqueue.connect()
    buildqueue = Queue('buildqueue', config)
    buildqueue.connect()
    rdb = redis_connection()
    if not rdb:
        log(key, 'redis is missing', 'error')
        return None
    rdb.set('darkbuildqueue-status', '1')
    while True:
        if check_shutdown():
            break
        try:
            time.sleep(60)
            length = buildqueue.length
            if length == 0:
                log(key, "Sleeping, no buildqueue job", 'info')
                time.sleep(60)
                continue
            task = buildqueue.dequeue()
            kojiurl = task.data['kojiurl']
            idx = task.data['jobid']
            kc = koji.ClientSession(kojiurl, {'debug': False, 'password': None,\
                            'debug_xmlrpc': False, 'user': None})

            res = kc.getBuild(idx)
            if not res:
                #We reached to the new build yet to start
                #Time to sleep
                log(key, "build deleted %s" % idx, 'error')
                continue
            if res['state'] == 1:
                #completed build now push to our redis queue
                jobqueue.enqueue(task)
                log(key, "in job queue %s" % idx, 'info')
                continue

            if res['state'] == 0:
                #building state
                buildqueue.enqueue(task)
                log(key, "in build queue %s" % idx, 'info')
                continue

        except Exception, error:
            log(key, str(error), 'error')
開發者ID:Ghost-script,項目名稱:darkserver,代碼行數:56,代碼來源:libimporter.py

示例10: produce_jobs

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def produce_jobs(infox):
    """ Queue the jobs into jobqueue
    :args infox: list of dictionaries contains the image url and the buildid
    """
    jobqueue = Queue('jobqueue')
    jobqueue.connect()

    family_mapping = {
        'Cloud_Base': 'b',
        'Atomic': 'a'
    }

    session = init_model()
    timestamp = datetime.datetime.now()
    for info in infox:
        image_name = info['path'].split('.x86_64')[0].split('/')[-1]
        jd = ComposeJobDetails(
            arch=info['arch'],
            compose_id=info['compose']['id'],
            created_on=timestamp,
            family=family_mapping[info['subvariant']],
            image_url=info['absolute_path'],
            last_updated=timestamp,
            release=info['compose']['release'],
            status='q',
            subvariant=info['subvariant'],
            user='admin',
            image_format=info['format'],
            image_type=info['type'],
            image_name=image_name,
        )
        session.add(jd)
        session.commit()

        job_details_id = jd.id
        log.info('Save {jd_id} to database'.format(jd_id=job_details_id))

        info.update({'job_id': jd.id})
        task = Task(info)
        jobqueue.enqueue(task)
        log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))

        publish_to_fedmsg(topic='image.queued',
                          compose_url=info['absolute_path'],
                          compose_id=info['compose']['id'],
                          image_name=image_name,
                          status='queued',
                          job_id=info['job_id'],
                          release=info['compose']['release'],
                          family=jd.family.value,
                          type=info['type'])

        session.close()
開發者ID:AdamWill,項目名稱:autocloud,代碼行數:55,代碼來源:__init__.py

示例11: upload_file

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def upload_file():
    if request.method == "POST":
        file = request.files["file"]
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(APP.config["UPLOAD_FOLDER"], filename))
            # Now add the information in the queue for processing
            t = Task({"filename": filename})
            queue = Queue("incoming_files")
            queue.connect()
            queue.enqueue(t)
            return "Log uploaded successfully."

    return """
開發者ID:kushaldas,項目名稱:teenspirit,代碼行數:16,代碼來源:app.py

示例12: LogBot

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
class LogBot(irc.IRCClient):
    """A logging IRC bot."""
    
    nickname = 'pyconsprints'

    def  __init__(self, channel):
        self.chn = '#'+channel
        self.qs_queue = []
        self.logger = None
        self.q = Queue('bug-messages')
        self.q.connect()
        self.channel_admin = ['kushal',]

    def connectionMade(self):
        irc.IRCClient.connectionMade(self)
        self.islogging = False
        self._namescallback = {}

    def connectionLost(self, reason):
        irc.IRCClient.connectionLost(self, reason)
        self.islogging = False

    def signedOn(self):
        """Called when bot has succesfully signed on to server."""
        self.join(self.factory.channel)

    def privmsg(self, user, channel, msg):
        """This will get called when the bot receives a message."""
        user = user.split('!', 1)[0]
        if user == BOTNAME:
            print '[[%s]]' % msg
            task = Task(msg)
            self.q.enqueue(task)
        user_cond = user in self.channel_admin
        if msg == '#masters' and user_cond:
            self.msg(self.chn, "My current masters are: %s" % ",".join(self.channel_admin))
        if msg.startswith('#add:') and user_cond:
            try:
                name = msg.split()[1]
                print name
                self.channel_admin.append(name)
                self.msg(self.chn,'%s is a master now.' % name)
            except Exception, err:
                print err
        if msg.startswith('#test:') and user_cond:
            bugid = msg.split()[1]
            msg = 'dummy/issue%s' % bugid
            task = Task(msg)
            self.q.enqueue(task)
開發者ID:kushaldas,項目名稱:pypatcher,代碼行數:51,代碼來源:patcherbot.py

示例13: init_task_queues

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
    def init_task_queues(self):
        """
        Connect to the retask.Queue for each group_id. Remove old tasks from queues.
        """
        try:
            for group in self.opts.build_groups:
                group_id = group["id"]
                queue = Queue("copr-be-{0}".format(group_id))
                queue.connect()
                self.task_queues[group_id] = queue
        except ConnectionError:
            raise CoprBackendError(
                "Could not connect to a task queue. Is Redis running?")

        self.clean_task_queues()
開發者ID:evilkost,項目名稱:copr,代碼行數:17,代碼來源:backend.py

示例14: RedisQueue

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
class RedisQueue(object):
    def __init__(self, host, name, port=6379, password=None):
        self.super_queue = Queue(
                name,
                {
                    'host': host,
                    'port': port,
                    'db': 0,
                    'password': password,
                })
        self.super_queue.connect()

    def get(self):
        return self.super_queue.wait()

    def put(self, data):
        self.super_queue.enqueue(Task(data))
開發者ID:rossdylan,項目名稱:netcrawl,代碼行數:19,代碼來源:RedisQueue.py

示例15: main

# 需要導入模塊: from retask.queue import Queue [as 別名]
# 或者: from retask.queue.Queue import connect [as 別名]
def main():
    queue = Queue('rcc')
    queue.connect()
    while True:
        task = queue.wait()
        name = task.data['filename']
        print "Received", name
        content = task.data['text']
        destdir = writesource(name, content)
        temp_path = os.path.join(destdir, name)
        x = os.path.join(destdir, 'test')
        out, err = system('gcc ' + temp_path + ' -o ' + x)
        if err:
            queue.send(task, err, 120)
        else:
            out1, err1 = system(x)
            if err1:
                queue.send(task, err1, 120)
            else:
                queue.send(task, out1, 120)
開發者ID:birajkarmakar,項目名稱:rcc,代碼行數:22,代碼來源:worker.py


注:本文中的retask.queue.Queue.connect方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。