当前位置: 首页>>代码示例>>Python>>正文


Python AMQPClient.exchange_declare方法代码示例

本文整理汇总了Python中synnefo.lib.amqp.AMQPClient.exchange_declare方法的典型用法代码示例。如果您正苦于以下问题:Python AMQPClient.exchange_declare方法的具体用法?Python AMQPClient.exchange_declare怎么用?Python AMQPClient.exchange_declare使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在synnefo.lib.amqp.AMQPClient的用法示例。


在下文中一共展示了AMQPClient.exchange_declare方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
def main():

    usage = "Usage: %s <instance_name>\n" % PROGNAME

    if len(sys.argv) != 2:
        sys.stderr.write(usage)
        return 1

    instance_name = sys.argv[1]

    # WARNING: This assumes that instance names
    # are of the form prefix-id, and uses prefix to
    # determine the routekey for AMPQ
    prefix = instance_name.split('-')[0]
    routekey = "ganeti.%s.event.progress" % prefix
    amqp_client = AMQPClient(confirm_buffer=10)
    amqp_client.connect()
    amqp_client.exchange_declare(settings.EXCHANGE_GANETI, "topic")

    for msg in jsonstream(sys.stdin):
        msg['event_time'] = split_time(time.time())
        msg['instance'] = instance_name

        # log to stderr
        sys.stderr.write("[MONITOR] %s\n" % json.dumps(msg))

        # then send it over AMQP
        amqp_client.basic_publish(exchange=settings.EXCHANGE_GANETI,
                                  routing_key=routekey,
                                  body=json.dumps(msg))

    amqp_client.close()
    return 0
开发者ID:AthinaB,项目名称:synnefo,代码行数:35,代码来源:progress_monitor.py

示例2: Queue

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
class Queue(object):
    """Queue.
       Required constructor parameters: hosts, exchange, client_id.
    """

    def __init__(self, **params):
        hosts = params['hosts']
        self.exchange = params['exchange']
        self.client_id = params['client_id']

        self.client = AMQPClient(hosts=hosts)
        self.client.connect()

        self.client.exchange_declare(exchange=self.exchange,
                                     type='topic')

    def send(self, message_key, user, instance, resource, value, details):
        body = Message(
            self.client_id, user, instance, resource, value, details)
        self.client.basic_publish(exchange=self.exchange,
                                  routing_key=message_key,
                                  body=json.dumps(body.__dict__))

    def close(self):
        self.client.close()
开发者ID:antonis-m,项目名称:synnefo,代码行数:27,代码来源:queue.py

示例3: JobFileHandler

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
class JobFileHandler(pyinotify.ProcessEvent):
    def __init__(self, logger, cluster_name):
        pyinotify.ProcessEvent.__init__(self)
        self.logger = logger
        self.cluster_name = cluster_name

        # Set max_retries to 0 for unlimited retries.
        self.client = AMQPClient(hosts=settings.AMQP_HOSTS, confirm_buffer=25,
                                 max_retries=0, logger=logger)

        handler_logger.info("Attempting to connect to RabbitMQ hosts")

        self.client.connect()
        handler_logger.info("Connected succesfully")

        self.client.exchange_declare(settings.EXCHANGE_GANETI, type='topic')

        self.op_handlers = {"INSTANCE": self.process_instance_op,
                            "NETWORK": self.process_network_op,
                            "CLUSTER": self.process_cluster_op}
                            # "GROUP": self.process_group_op}

    def process_IN_CLOSE_WRITE(self, event):
        self.process_IN_MOVED_TO(event)

    def process_IN_MOVED_TO(self, event):
        jobfile = os.path.join(event.path, event.name)
        if not event.name.startswith("job-"):
            self.logger.debug("Not a job file: %s" % event.path)
            return

        try:
            data = utils.ReadFile(jobfile)
        except IOError:
            return

        data = serializer.LoadJson(data)
        job = jqueue._QueuedJob.Restore(None, data, False, False)

        job_id = int(job.id)

        for op in job.ops:
            op_id = op.input.OP_ID

            msg = None
            try:
                handler_fn = self.op_handlers[op_id.split('_')[1]]
                msg, routekey = handler_fn(op, job_id)
            except KeyError:
                pass

            if not msg:
                self.logger.debug("Ignoring job: %s: %s", job_id, op_id)
                continue

            # Generate a unique message identifier
            event_time = get_time_from_status(op, job)

            # Get the last line of the op log as message
            try:
                logmsg = op.log[-1][-1]
            except IndexError:
                logmsg = None

            # Add shared attributes for all operations
            msg.update({"event_time": event_time,
                        "operation": op_id,
                        "status": op.status,
                        "cluster": self.cluster_name,
                        "logmsg": logmsg,
                        "result": op.result,
                        "jobId": job_id})

            if op.status == "success":
                msg["result"] = op.result

            if op_id == "OP_INSTANCE_CREATE" and op.status == "error":
                # In case an instance creation fails send the job input
                # so that the job can be retried if needed.
                msg["job_fields"] = op.Serialize()["input"]

            msg = json.dumps(msg)

            self.logger.debug("Delivering msg: %s (key=%s)", msg, routekey)

            # Send the message to RabbitMQ
            self.client.basic_publish(settings.EXCHANGE_GANETI,
                                      routekey,
                                      msg)

    def process_instance_op(self, op, job_id):
        """ Process OP_INSTANCE_* opcodes.

        """
        input = op.input
        op_id = input.OP_ID

        instances = None
        instances = get_field(input, 'instance_name')
        if not instances:
#.........这里部分代码省略.........
开发者ID:apyrgio,项目名称:synnefo,代码行数:103,代码来源:eventd.py

示例4: JobFileHandler

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
class JobFileHandler(pyinotify.ProcessEvent):
    def __init__(self, logger, cluster_name):
        pyinotify.ProcessEvent.__init__(self)
        self.logger = logger
        self.cluster_name = cluster_name

        # Set max_retries to 0 for unlimited retries.
        self.client = AMQPClient(hosts=settings.AMQP_HOSTS, confirm_buffer=25,
                                 max_retries=0, logger=logger)

        logger.info("Attempting to connect to RabbitMQ hosts")

        self.client.connect()
        logger.info("Connected successfully")

        self.ganeti_master = get_ganeti_master()
        logger.debug("Ganeti Master Node: %s", self.ganeti_master)

        self.ganeti_node = get_ganeti_node()
        logger.debug("Current Ganeti Node: %s", self.ganeti_node)

        # Check if this is the master node
        logger.info("Checking if this is Ganeti Master of %s cluster: %s",
                    self.cluster_name,
                    "YES" if self.ganeti_master == self.ganeti_node else "NO")

        self.client.exchange_declare(settings.EXCHANGE_GANETI, type='topic')

        self.op_handlers = {"INSTANCE": self.process_instance_op,
                            "NETWORK": self.process_network_op,
                            "CLUSTER": self.process_cluster_op,
                            # "GROUP": self.process_group_op}
                            "TAGS": self.process_tag_op}

    def process_IN_CLOSE_WRITE(self, event):
        self.process_IN_MOVED_TO(event)

    def process_IN_MOVED_TO(self, event):
        jobfile = os.path.join(event.path, event.name)
        if not event.name.startswith("job-"):
            self.logger.debug("Not a job file: %s" % event.path)
            return

        try:
            data = utils.ReadFile(jobfile)
        except IOError:
            return

        data = serializer.LoadJson(data)
        job = jqueue._QueuedJob.Restore(None, data, False, False)

        job_id = int(job.id)

        for op in job.ops:
            op_id = op.input.OP_ID

            msg = None
            try:
                handler_fn = self.op_handlers[op_id.split('_')[1]]
                msg, routekey = handler_fn(op, job_id)
            except KeyError:
                pass

            if not msg:
                self.logger.debug("Ignoring job: %s: %s", job_id, op_id)
                continue

            # Generate a unique message identifier
            event_time = get_time_from_status(op, job)

            # Get the last line of the op log as message
            try:
                logmsg = op.log[-1][-1]
            except IndexError:
                logmsg = None

            # Add shared attributes for all operations
            msg.update({"event_time": event_time,
                        "operation": op_id,
                        "status": op.status,
                        "cluster": self.cluster_name,
                        "logmsg": logmsg,
                        "result": op.result,
                        "jobId": job_id})

            if op.status == "success":
                msg["result"] = op.result

            if op_id == "OP_INSTANCE_CREATE" and op.status == "error":
                # In case an instance creation fails send the job input
                # so that the job can be retried if needed.
                msg["job_fields"] = op.Serialize()["input"]

            # Check if this is the master node. Only the master node should
            # deliver messages to RabbitMQ.
            current_master = get_ganeti_master()
            if self.ganeti_master != current_master:
                self.logger.warning("Ganeti Master changed! New Master: %s",
                                    current_master)

#.........这里部分代码省略.........
开发者ID:grnet,项目名称:synnefo,代码行数:103,代码来源:eventd.py

示例5: __init__

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
class Dispatcher:
    debug = False

    def __init__(self, debug=False):
        self.debug = debug
        self._init()

    def wait(self):
        log.info("Waiting for messages..")
        timeout = 600
        while True:
            try:
                # Close the Django DB connection before processing
                # every incoming message. This plays nicely with
                # DB connection pooling, if enabled and allows
                # the dispatcher to recover from broken connections
                # gracefully.
                close_connection()
                msg = self.client.basic_wait(timeout=timeout)
                if not msg:
                    log.warning(
                        "Idle connection for %d seconds. Will connect"
                        " to a different host. Verify that"
                        " snf-ganeti-eventd is running!!",
                        timeout,
                    )
                    self.client.reconnect()
            except SystemExit:
                break
            except Exception as e:
                log.exception("Caught unexpected exception: %s", e)

        self.client.basic_cancel()
        self.client.close()

    def _init(self):
        log.info("Initializing")

        self.client = AMQPClient(logger=log_amqp)
        # Connect to AMQP host
        self.client.connect()

        # Declare queues and exchanges
        exchange = settings.EXCHANGE_GANETI
        exchange_dl = queues.convert_exchange_to_dead(exchange)
        self.client.exchange_declare(exchange=exchange, type="topic")
        self.client.exchange_declare(exchange=exchange_dl, type="topic")

        for queue in queues.QUEUES:
            # Queues are mirrored to all RabbitMQ brokers
            self.client.queue_declare(queue=queue, mirrored=True, dead_letter_exchange=exchange_dl)
            # Declare the corresponding dead-letter queue
            queue_dl = queues.convert_queue_to_dead(queue)
            self.client.queue_declare(queue=queue_dl, mirrored=True)

        # Bind queues to handler methods
        for binding in queues.BINDINGS:
            try:
                callback = getattr(callbacks, binding[3])
            except AttributeError:
                log.error("Cannot find callback %s", binding[3])
                raise SystemExit(1)
            queue = binding[0]
            exchange = binding[1]
            routing_key = binding[2]

            self.client.queue_bind(queue=queue, exchange=exchange, routing_key=routing_key)

            self.client.basic_consume(queue=binding[0], callback=callback, prefetch_count=5)

            queue_dl = queues.convert_queue_to_dead(queue)
            exchange_dl = queues.convert_exchange_to_dead(exchange)
            # Bind the corresponding dead-letter queue
            self.client.queue_bind(queue=queue_dl, exchange=exchange_dl, routing_key=routing_key)

            log.debug("Binding %s(%s) to queue %s with handler %s", exchange, routing_key, queue, binding[3])
开发者ID:vkoukis,项目名称:synnefo,代码行数:78,代码来源:dispatcher.py

示例6: main

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
def main():
    parser = OptionParser()
    parser.add_option('-v', '--verbose', action='store_true', default=False,
                      dest='verbose', help='Enable verbose logging')
    parser.add_option('--host', default=BROKER_HOST, dest='host',
                      help='RabbitMQ host (default: %s)' % BROKER_HOST)
    parser.add_option('--port', default=BROKER_PORT, dest='port',
                      help='RabbitMQ port (default: %s)' % BROKER_PORT, type='int')
    parser.add_option('--user', default=BROKER_USER, dest='user',
                      help='RabbitMQ user (default: %s)' % BROKER_USER)
    parser.add_option('--password', default=BROKER_PASSWORD, dest='password',
                      help='RabbitMQ password (default: %s)' % BROKER_PASSWORD)
    parser.add_option('--vhost', default=BROKER_VHOST, dest='vhost',
                      help='RabbitMQ vhost (default: %s)' % BROKER_VHOST)
    parser.add_option('--queue', default=CONSUMER_QUEUE, dest='queue',
                      help='RabbitMQ queue (default: %s)' % CONSUMER_QUEUE)
    parser.add_option('--exchange', default=CONSUMER_EXCHANGE, dest='exchange',
                      help='RabbitMQ exchange (default: %s)' % CONSUMER_EXCHANGE)
    parser.add_option('--key', default=CONSUMER_KEY, dest='key',
                      help='RabbitMQ key (default: %s)' % CONSUMER_KEY)
    parser.add_option('--callback', default=None, dest='callback',
                      help='Callback function to consume messages')
    parser.add_option('--test', action='store_true', default=False,
                      dest='test', help='Produce a dummy message for testing')
    opts, args = parser.parse_args()

    DEBUG = False
    if opts.verbose:
        DEBUG = True
    logging.basicConfig(
        format='%(asctime)s [%(levelname)s] %(name)s %(message)s',
        datefmt='%Y-%m-%d %H:%M:%S',
        level=logging.DEBUG if DEBUG else logging.INFO)
    logger = logging.getLogger('dispatcher')

    host =  'amqp://%s:%[email protected]%s:%s' % (opts.user, opts.password, opts.host, opts.port)
    queue = opts.queue
    key = opts.key
    exchange = opts.exchange
    
    client = AMQPClient(hosts=[host])
    client.connect()

    if opts.test:
        client.exchange_declare(exchange=exchange,
                                type='topic')
        client.basic_publish(exchange=exchange,
                             routing_key=key,
                             body= json.dumps({"test": "0123456789"}))
        client.close()
        sys.exit()

    callback = None
    if opts.callback:
        cb = opts.callback.rsplit('.', 1)
        if len(cb) == 2:
            __import__(cb[0])
            cb_module = sys.modules[cb[0]]
            callback = getattr(cb_module, cb[1])

    def handle_message(client, msg):
        logger.debug('%s', msg)
        if callback:
            callback(msg)
        client.basic_ack(msg)

    client.queue_declare(queue=queue)
    client.queue_bind(queue=queue,
                      exchange=exchange,
                      routing_key=key)

    client.basic_consume(queue=queue, callback=handle_message)

    try:
        while True:
            client.basic_wait()
    except KeyboardInterrupt:
        pass
    finally:
        client.close()
开发者ID:antonis-m,项目名称:synnefo,代码行数:82,代码来源:dispatcher.py

示例7: __init__

# 需要导入模块: from synnefo.lib.amqp import AMQPClient [as 别名]
# 或者: from synnefo.lib.amqp.AMQPClient import exchange_declare [as 别名]
class Dispatcher:
    debug = False

    def __init__(self, debug=False):
        self.debug = debug
        self._init()

    def wait(self):
        log.info("Waiting for messages..")
        timeout = DISPATCHER_RECONNECT_TIMEOUT
        while True:
            try:
                # Close the Django DB connection before processing
                # every incoming message. This plays nicely with
                # DB connection pooling, if enabled and allows
                # the dispatcher to recover from broken connections
                # gracefully.
                close_connection()
                msg = self.client.basic_wait(timeout=timeout)
                if not msg:
                    log.warning("Idle connection for %d seconds. Will connect"
                                " to a different host. Verify that"
                                " snf-ganeti-eventd is running!!", timeout)
                    self.client.reconnect(timeout=1)
            except AMQPConnectionError as e:
                log.error("AMQP connection failed: %s" % e)
                log.warning("Sleeping for %d seconds before retrying to "
                            "connect to an AMQP broker" %
                            DISPATCHER_FAILED_CONNECTION_WAIT)
                time.sleep(DISPATCHER_FAILED_CONNECTION_WAIT)
            except select.error as e:
                if e[0] != errno.EINTR:
                    log.exception("Caught unexpected exception: %s", e)
                    log.warning("Sleeping for %d seconds before retrying to "
                                "connect to an AMQP broker" %
                                DISPATCHER_FAILED_CONNECTION_WAIT)
                    time.sleep(DISPATCHER_FAILED_CONNECTION_WAIT)
                else:
                    break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as e:
                log.exception("Caught unexpected exception: %s", e)
                log.warning("Sleeping for %d seconds before retrying to "
                            "connect to an AMQP broker" %
                            DISPATCHER_FAILED_CONNECTION_WAIT)
                time.sleep(DISPATCHER_FAILED_CONNECTION_WAIT)

        log.info("Clean up AMQP connection before exit")
        self.client.basic_cancel(timeout=1)
        self.client.close(timeout=1)

    def _init(self):
        log.info("Initializing")

        # Set confirm buffer to 1 for heartbeat messages
        self.client = AMQPClient(logger=log_amqp, confirm_buffer=1)
        # Connect to AMQP host
        self.client.connect()

        # Declare queues and exchanges
        exchange = settings.EXCHANGE_GANETI
        exchange_dl = queues.convert_exchange_to_dead(exchange)
        self.client.exchange_declare(exchange=exchange,
                                     type="topic")
        self.client.exchange_declare(exchange=exchange_dl,
                                     type="topic")
        for queue in queues.QUEUES:
            # Queues are mirrored to all RabbitMQ brokers
            self.client.queue_declare(queue=queue, mirrored=True,
                                      dead_letter_exchange=exchange_dl)
            # Declare the corresponding dead-letter queue
            queue_dl = queues.convert_queue_to_dead(queue)
            self.client.queue_declare(queue=queue_dl, mirrored=True)

        # Bind queues to handler methods
        for binding in queues.BINDINGS:
            try:
                callback = getattr(callbacks, binding[3])
            except AttributeError:
                log.error("Cannot find callback %s", binding[3])
                raise SystemExit(1)
            queue = binding[0]
            exchange = binding[1]
            routing_key = binding[2]

            self.client.queue_bind(queue=queue, exchange=exchange,
                                   routing_key=routing_key)

            self.client.basic_consume(queue=binding[0],
                                      callback=callback,
                                      prefetch_count=5)

            queue_dl = queues.convert_queue_to_dead(queue)
            exchange_dl = queues.convert_exchange_to_dead(exchange)
            # Bind the corresponding dead-letter queue
            self.client.queue_bind(queue=queue_dl,
                                   exchange=exchange_dl,
                                   routing_key=routing_key)

#.........这里部分代码省略.........
开发者ID:gvsurenderreddy,项目名称:synnefo-1,代码行数:103,代码来源:dispatcher.py


注:本文中的synnefo.lib.amqp.AMQPClient.exchange_declare方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。