当前位置: 首页>>代码示例>>Python>>正文


Python config.Config类代码示例

本文整理汇总了Python中aquilon.config.Config的典型用法代码示例。如果您正苦于以下问题:Python Config类的具体用法?Python Config怎么用?Python Config使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Config类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sync_domain

def sync_domain(dbdomain, logger=LOGGER, locked=False):
    """Update templates on disk to match contents of branch in template-king.

    If this domain is tracking another, first update the branch in
    template-king with the latest from the tracking branch.  Also save
    the current (previous) commit as a potential rollback point.

    """
    config = Config()
    kingdir = config.get("broker", "kingdir")
    domaindir = os.path.join(config.get("broker", "domainsdir"), dbdomain.name)
    git_env = {"PATH": "%s:%s" % (config.get("broker", "git_path"),
                                  os.environ.get("PATH", ""))}
    if dbdomain.tracked_branch:
        # Might need to revisit if using this helper from rollback...
        run_command(["git", "push", ".",
                     "%s:%s" % (dbdomain.tracked_branch.name, dbdomain.name)],
                    path=kingdir, env=git_env, logger=logger)
    run_command(["git", "fetch", "--prune"], path=domaindir, env=git_env, logger=logger)
    if dbdomain.tracked_branch:
        out = run_command(["git", "rev-list", "-n", "1", "HEAD"],
                          path=domaindir, env=git_env, logger=logger)
        rollback_commit = out.strip()
    try:
        if not locked:
            key = CompileKey(domain=dbdomain.name, logger=logger)
            lock_queue.acquire(key)
        run_command(["git", "reset", "--hard", "origin/%s" % dbdomain.name],
                    path=domaindir, env=git_env, logger=logger)
    finally:
        if not locked:
            lock_queue.release(key)
    if dbdomain.tracked_branch:
        dbdomain.rollback_commit = rollback_commit
开发者ID:piojo,项目名称:aquilon,代码行数:34,代码来源:processes.py

示例2: onEnter

    def onEnter(self, dbcluster):
        dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster),
                                                    "decommissioned",
                                                    compel=True)

        config = Config()
        archetype = dbcluster.personality.archetype
        section = "archetype_" + archetype.name
        opt = "allow_cascaded_deco"

        if dbcluster.hosts and (not config.has_option(section, opt) or
                                not config.getboolean(section, opt)):
            raise ArgumentError("Cannot change state to {0}, as {1}'s "
                                "archetype is {2}."
                                .format(dbdecommissioned.name, dbcluster,
                                        archetype.name))

        if dbcluster.virtual_machines:
            raise ArgumentError("Cannot change state to {0}, as {1} has "
                                "{2} VM(s)."
                                .format(dbdecommissioned.name, dbcluster,
                                        len(dbcluster.virtual_machines)))

        for dbhost in dbcluster.hosts:
            dbhost.status.transition(dbhost, dbdecommissioned)
开发者ID:piojo,项目名称:aquilon,代码行数:25,代码来源:clusterlifecycle.py

示例3: discover_network_types

def discover_network_types(dbapi_con, connection_record):  # pylint: disable=W0613
    config = Config()
    if not config.has_option("broker", "default_network_type"):  # pragma: no cover
        raise InternalError("The default_network_type option is missing from "
                            "the [broker] section in the configuration.")

    default_type = config.get("broker", "default_network_type")
    default_section = "network_" + default_type
    if not config.has_section(default_section):  # pragma: no cover
        raise InternalError("The default network type is %s, but there's no "
                            "section named [%s] in the configuration." %
                            (default_type, default_section))

    nettypes = {}

    # This function should be called only once, but you never know...
    if Network.network_type_map:
        return

    for section in config.sections():
        if not section.startswith("network_"):
            continue
        name = section[8:]
        nettypes[name] = NetworkProperties(config, name)
        LOGGER.info("Configured network type %s", name)

    Network.network_type_map = nettypes
    Network.default_network_props = nettypes[default_type]
开发者ID:piojo,项目名称:aquilon,代码行数:28,代码来源:network.py

示例4: run_git

def run_git(args, env=None, path=".",
            logger=LOGGER, loglevel=logging.INFO, filterre=None):
    config = Config()
    if env:
        git_env = env.copy()
    else:
        git_env = {}
    env_path = git_env.get("PATH", os.environ.get("PATH", ""))
    git_env["PATH"] = "%s:%s" % (config.get("broker", "git_path"), env_path)

    for name in ["git_author_name", "git_author_email",
                 "git_committer_name", "git_committer_email"]:
        if not config.has_option("broker", name):
            continue
        value = config.get("broker", name)
        git_env[name.upper()] = value

    if isinstance(args, list):
        git_args = args[:]
        if git_args[0] != "git":
            git_args.insert(0, "git")
    else:
        git_args = ["git", args]

    return run_command(git_args, env=git_env, path=path,
                       logger=logger, loglevel=loglevel, filterre=filterre)
开发者ID:piojo,项目名称:aquilon,代码行数:26,代码来源:processes.py

示例5: config_proto

    def config_proto(self, node, command):
        desc_node = node.find("message_class")
        if desc_node is None or "name" not in desc_node.attrib or \
           "module" not in desc_node.attrib:
            raise ProtocolError("Invalid protobuf definition for %s." % command)

        module = desc_node.attrib["module"]
        msgclass = desc_node.attrib["name"]

        if module in self.loaded_protocols and \
           self.loaded_protocols[module] == False:
            raise ProtocolError("Protocol %s: previous import attempt was "
                                "unsuccessful" % module)

        if module not in self.loaded_protocols:
            config = Config()
            protodir = config.get("protocols", "directory")

            # Modifying sys.path here is ugly. We could try playing with
            # find_module()/load_module(), but there are dependencies between
            # the protocols, that could fail if sys.path is not set up and the
            # protocols are loaded in the wrong order.
            if protodir not in sys.path:
                sys.path.append(protodir)

            try:
                self.loaded_protocols[module] = __import__(module)
            except ImportError, err:  # pragma: no cover
                self.loaded_protocols[module] = False
                raise ProtocolError("Protocol %s: %s" % (module, err))
开发者ID:piojo,项目名称:aquilon,代码行数:30,代码来源:formatters.py

示例6: __init__

 def __init__(self, logger=LOGGER):
     config = Config()
     self.logger = logger
     self.dsdb = config.get("broker", "dsdb")
     self.dsdb_use_testdb = config.getboolean("broker", "dsdb_use_testdb")
     self.location_sync = config.getboolean("broker", "dsdb_location_sync")
     self.actions = []
     self.rollback_list = []
开发者ID:piojo,项目名称:aquilon,代码行数:8,代码来源:processes.py

示例7: outputdirs

 def outputdirs(self):
     """Returns a list of directories that should exist before compiling"""
     config = Config()
     dirs = []
     dirs.append(config.get("broker", "profilesdir"))
     # The regression tests occasionally have issues with panc
     # auto-creating this directory - not sure why.
     if self.domain.clusters:
         dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name, "clusters"))
     return dirs
开发者ID:stdweird,项目名称:aquilon,代码行数:10,代码来源:domain.py

示例8: cache_storage_data

def cache_storage_data(only=None):
    """
    Scan a storeng-style data file, checking each line as we go

    Storeng-style data files are blocks of data. Each block starts
    with a comment describing the fields for all subsequent lines. A
    block can start at any time. Fields are separated by '|'.
    This function will invoke the function after parsing every data
    line. The function will be called with a dict of the fields. If the
    function returns True, then we stop scanning the file, else we continue
    on until there is nothing left to parse.

    dbshare can be a Share
    """

    config = Config()
    sharedata = {}
    found_header = False
    header_idx = {}
    with open(config.get("broker", "sharedata")) as datafile:
        for line in datafile:
            if line[0] == '#':
                # A header line
                found_header = True
                hdr = line[1:].rstrip().split('|')

                header_idx = {}
                for idx, name in enumerate(hdr):
                    header_idx[name] = idx

                # Silently discard lines that don't have all the required info
                for k in ["objtype", "pshare", "server", "dg"]:
                    if k not in header_idx:
                        found_header = False
            elif not found_header:
                # We haven't found the right header line
                continue
            else:
                fields = line.rstrip().split('|')
                if len(fields) != len(header_idx):  # Silently ignore invalid lines
                    continue
                if fields[header_idx["objtype"]] != "pshare":
                    continue

                sharedata[fields[header_idx["pshare"]]] = ShareInfo(
                    server=fields[header_idx["server"]],
                    mount="/vol/%s/%s" % (fields[header_idx["dg"]],
                                          fields[header_idx["pshare"]])
                )

                # Take a shortcut if we need just a single entry
                if only and only == fields[header_idx["pshare"]]:
                    break

        return sharedata
开发者ID:jrha,项目名称:aquilon,代码行数:55,代码来源:storage.py

示例9: write_file

def write_file(filename, content, mode=None, compress=None, create_directory=False, logger=LOGGER):
    """Atomically write content into the specified filename.

    The content is written into a temp file in the same directory as
    filename, and then swapped into place with rename.  This assumes
    that both the file and the directory can be written to by the
    broker.  The same directory was used instead of a temporary
    directory because atomic swaps are generally only available when
    the source and the target are on the same filesystem.

    If mode is set, change permissions on the file (newly created or
    pre-existing) to the new mode.  If unset and the file exists, the
    current permissions will be kept.  If unset and the file is new,
    the default is 0644.

    This method may raise OSError if any of the OS-related methods
    (creating the temp file, writing to it, correcting permissions,
    swapping into place) fail.  The method will attempt to remove
    the temp file if it had been created.

    If the compress keyword is passed, the content is compressed in
    memory before writing.  The only compression currently supported
    is gzip.

    """
    if compress == "gzip":
        config = Config()
        buffer = StringIO()
        compress = config.getint("broker", "gzip_level")
        zipper = gzip.GzipFile(filename, "wb", compress, buffer)
        zipper.write(content)
        zipper.close()
        content = buffer.getvalue()
    if mode is None:
        try:
            old_mode = os.stat(filename).st_mode
        except OSError:
            old_mode = 0644
    dirname, basename = os.path.split(filename)

    if not os.path.exists(dirname) and create_directory:
        os.makedirs(dirname)

    fd, fpath = mkstemp(prefix=basename, dir=dirname)
    try:
        with os.fdopen(fd, "w") as f:
            f.write(content)
        if mode is None:
            os.chmod(fpath, old_mode)
        else:
            os.chmod(fpath, mode)
        os.rename(fpath, filename)
    finally:
        if os.path.exists(fpath):
            os.remove(fpath)
开发者ID:piojo,项目名称:aquilon,代码行数:55,代码来源:utils.py

示例10: _snapshot_db

    def _snapshot_db(self, test):
        # If there was an error, and we're using SQLite, create a snapshot
        # TODO: create a git-managed snapshot of the plenaries/profiles as well
        config = Config()
        dsn = config.get("database", "dsn")
        if dsn.startswith("sqlite:///"):

            dbfile = dsn[10:]
            target = dbfile + ".%s:%s" % (test.__class__.__name__,
                                          test._testMethodName)
            call(["/bin/cp", "-a", dbfile, target])
开发者ID:jrha,项目名称:aquilon,代码行数:11,代码来源:verbose_text_test.py

示例11: main

def main():
    parser = argparse.ArgumentParser(description="Send out broker notifications")
    parser.add_argument("-c", "--config", dest="config",
                        help="location of the broker configuration file")
    parser.add_argument("--one_shot", action="store_true",
                        help="do just a single run and then exit")
    parser.add_argument("--debug", action="store_true",
                        help="turn on debug logs on stderr")

    opts = parser.parse_args()

    config = Config(configfile=opts.config)

    # These modules must be imported after the configuration has been
    # initialized
    from aquilon.aqdb.db_factory import DbFactory

    db = DbFactory()

    if opts.debug:
        level = logging.DEBUG
        logging.basicConfig(level=level, stream=sys.stderr,
                            format='%(asctime)s [%(levelname)s] %(message)s')
    else:
        level = logging.INFO
        logfile = os.path.join(config.get("broker", "logdir"), "aq_notifyd.log")

        handler = WatchedFileHandler(logfile)
        handler.setLevel(level)

        formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
        handler.setFormatter(formatter)

        rootlog = logging.getLogger()
        rootlog.addHandler(handler)
        rootlog.setLevel(level)

    # Apply configured log settings
    for logname, level in config.items("logging"):
        if level not in logging._levelNames:
            continue
        logging.getLogger(logname).setLevel(logging._levelNames[level])

    logger = logging.getLogger("aq_notifyd")

    if opts.one_shot:
        update_index_and_notify(config, logger, db)
    else:
        signal.signal(signal.SIGTERM, exit_handler)
        signal.signal(signal.SIGINT, exit_handler)

        run_loop(config, logger, db)
开发者ID:piojo,项目名称:aquilon,代码行数:52,代码来源:aq_notifyd.py

示例12: directories

    def directories(self):
        """Return a list of directories required for compiling this domain"""
        config = Config()
        dirs = []

        if self.domain.branch_type == "domain":
            dirs.append(os.path.join(config.get("broker", "domainsdir"), self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"), "cfg", "domains", self.domain.name))

        dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name))

        return dirs
开发者ID:stdweird,项目名称:aquilon,代码行数:13,代码来源:domain.py

示例13: __init__

    def __init__(self, dbhost, logger=LOGGER):
        super(PlenaryHost, self).__init__(logger=logger)

        if not isinstance(dbhost, Host):
            raise InternalError("PlenaryHost called with %s instead of Host" %
                                dbhost.__class__.name)
        self.dbobj = dbhost
        config = Config()
        if config.getboolean("broker", "namespaced_host_profiles"):
            self.plenaries.append(PlenaryNamespacedHost.get_plenary(dbhost))
        if config.getboolean("broker", "flat_host_profiles"):
            self.plenaries.append(PlenaryToplevelHost.get_plenary(dbhost))
        self.plenaries.append(PlenaryHostData.get_plenary(dbhost))
开发者ID:piojo,项目名称:aquilon,代码行数:13,代码来源:host.py

示例14: __init__

    def __init__(self, dbobj=None, logger=LOGGER):
        self.config = Config()
        self.dbobj = dbobj
        self.logger = logger

        if self.template_type is None:
            raise InternalError("Plenary class %s did not set the template "
                                "type" % self.__class__.__name__)

        # Object templates live under the branch-specific build directory.
        # Everything else lives under the common plenary directory.
        if self.template_type == "object":
            if not dbobj or not hasattr(dbobj, "branch"):
                raise InternalError("Plenaries meant to be compiled need a DB "
                                    "object that has a branch; got: %r" % dbobj)
            self.dir = "%s/domains/%s/profiles" % (
                self.config.get("broker", "builddir"), dbobj.branch.name)
        else:
            self.dir = self.config.get("broker", "plenarydir")

        self.loadpath = None
        self.plenary_template = None
        self.plenary_core = None

        self.new_content = None
        # The following attributes are for stash/restore_stash
        self.old_content = None
        self.old_mtime = None
        self.stashed = False
        self.removed = False
        self.changed = False
开发者ID:jrha,项目名称:aquilon,代码行数:31,代码来源:base.py

示例15: __init__

    def __init__(self, network=None, network_type=None, **kw):
        # pylint: disable=W0621
        if not isinstance(network, IPv4Network):
            raise InternalError("Expected an IPv4Network, got: %s" %
                                type(network))

        if not network_type:
            config = Config()
            network_type = config.get("broker", "default_network_type")

        self._network = network
        self._props = self.network_type_map.get(self.network_type,
                                                self.default_network_props)

        super(Network, self).__init__(ip=network.network,
                                      cidr=network.prefixlen,
                                      network_type=network_type, **kw)
开发者ID:piojo,项目名称:aquilon,代码行数:17,代码来源:network.py


注:本文中的aquilon.config.Config类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。