当前位置: 首页>>代码示例>>Python>>正文


Python StorageRouterList.get_by_ip方法代码示例

本文整理汇总了Python中ovs.dal.lists.storagerouterlist.StorageRouterList.get_by_ip方法的典型用法代码示例。如果您正苦于以下问题:Python StorageRouterList.get_by_ip方法的具体用法?Python StorageRouterList.get_by_ip怎么用?Python StorageRouterList.get_by_ip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ovs.dal.lists.storagerouterlist.StorageRouterList的用法示例。


在下文中一共展示了StorageRouterList.get_by_ip方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: register

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
 def register(node_id):
     """
     Adds a Node with a given node_id to the model
     :param node_id: ID of the ALBA node
     :type node_id: str
     :return: None
     """
     node = AlbaNodeList.get_albanode_by_node_id(node_id)
     if node is None:
         main_config = Configuration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
         node = AlbaNode()
         node.ip = main_config['ip']
         node.port = main_config['port']
         node.username = main_config['username']
         node.password = main_config['password']
         node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
     data = node.client.get_metadata()
     if data['_success'] is False and data['_error'] == 'Invalid credentials':
         raise RuntimeError('Invalid credentials')
     if data['node_id'] != node_id:
         AlbaNodeController._logger.error('Unexpected node_id: {0} vs {1}'.format(data['node_id'], node_id))
         raise RuntimeError('Unexpected node identifier')
     node.node_id = node_id
     node.type = 'ASD'
     node.save()
     AlbaController.checkup_maintenance_agents.delay()
开发者ID:openvstorage,项目名称:framework-alba-plugin,代码行数:28,代码来源:albanode.py

示例2: get_storage_router_by_ip

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
 def get_storage_router_by_ip(ip):
     """
     Retrieve Storage Router based on IP
     :param ip: IP of Storage Router
     :return: Storage Router DAL object
     """
     return StorageRouterList.get_by_ip(ip)
开发者ID:openvstorage,项目名称:integrationtests,代码行数:9,代码来源:general_storagerouter.py

示例3: register

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def register(node_id):
        """
        Adds a Node with a given node_id to the model
        :param node_id: ID of the ALBA node
        :type node_id: str

        :return: None
        """
        node = AlbaNodeList.get_albanode_by_node_id(node_id)
        if node is None:
            main_config = EtcdConfiguration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
            node = AlbaNode()
            node.ip = main_config['ip']
            node.port = main_config['port']
            node.username = main_config['username']
            node.password = main_config['password']
            node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
        data = node.client.get_metadata()
        if data['_success'] is False and data['_error'] == 'Invalid credentials':
            raise RuntimeError('Invalid credentials')
        if data['node_id'] != node_id:
            AlbaNodeController._logger.error('Unexpected node_id: {0} vs {1}'.format(data['node_id'], node_id))
            raise RuntimeError('Unexpected node identifier')
        node.node_id = node_id
        node.type = 'ASD'
        node.save()

        # increase maintenance agents count for all nodes by 1
        for backend in AlbaBackendList.get_albabackends():
            nr_of_agents_key = AlbaNodeController.NR_OF_AGENTS_ETCD_TEMPLATE.format(backend.guid)
            if EtcdConfiguration.exists(nr_of_agents_key):
                EtcdConfiguration.set(nr_of_agents_key, int(EtcdConfiguration.get(nr_of_agents_key) + 1))
            else:
                EtcdConfiguration.set(nr_of_agents_key, 1)
        AlbaNodeController.checkup_maintenance_agents()
开发者ID:DarumasLegs,项目名称:framework-alba-plugin,代码行数:37,代码来源:albanodecontroller.py

示例4: manage_running_tasks

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
 def manage_running_tasks(tasklist, timesleep=10):
     """
     Manage a list of running celery task
     - discard PENDING tasks after a certain timeout
     - validate RUNNING tasks are actually running
     :param tasklist: Dictionary of tasks to wait {IP address: AsyncResult}
     :type tasklist: dict
     :param timesleep: leep between checks -
       -for long running tasks it's better to sleep for a longer period of time to reduce number of ssh calls
     :type timesleep: int
     :return: results
     :rtype: dict
     """
     logger = LogHandler.get('lib', name='celery toolbox')
     ssh_clients = {}
     tasks_pending = {}
     tasks_pending_timeout = 1800  # 30 minutes
     results = {}
     failed_nodes = []
     while len(tasklist.keys()) > 0:
         for ip, task in tasklist.items():
             if task.state in ('SUCCESS', 'FAILURE'):
                 logger.info('Task {0} finished: {1}'.format(task.id, task.state))
                 results[ip] = task.get(propagate=False)
                 del tasklist[ip]
             elif task.state == 'PENDING':
                 if task.id not in tasks_pending:
                     tasks_pending[task.id] = time.time()
                 else:
                     task_pending_since = tasks_pending[task.id]
                     if time.time() - task_pending_since > tasks_pending_timeout:
                         logger.warning('Task {0} is pending since {1} on node {2}. Task will be revoked'.format(task.id, datetime.datetime.fromtimestamp(task_pending_since), ip))
                         revoke(task.id)
                         del tasklist[ip]
                         del tasks_pending[task.id]
                         failed_nodes.append(ip)
             elif task.state == 'STARTED':
                 if ip not in ssh_clients:
                     ssh_clients[ip] = SSHClient(ip, username='root')
                 client = ssh_clients[ip]
                 if ServiceManager.get_service_status('workers', client) is False:
                     logger.error('Service ovs-workers on node {0} appears halted while there is a task PENDING for it {1}. Task will be revoked.'.format(ip, task.id))
                     revoke(task.id)
                     del tasklist[ip]
                     failed_nodes.append(ip)
                 else:
                     ping_result = task.app.control.inspect().ping()
                     storage_router = StorageRouterList.get_by_ip(ip)
                     if "[email protected]{0}".format(storage_router.name) not in ping_result:
                         logger.error('Service ovs-workers on node {0} is not reachable via rabbitmq while there is a task STARTED for it {1}. Task will be revoked.'.format(ip, task.id))
                         revoke(task.id)
                         del tasklist[ip]
                         failed_nodes.append(ip)
         if len(tasklist.keys()) > 0:
             time.sleep(timesleep)
     return results, failed_nodes
开发者ID:DarumasLegs,项目名称:framework,代码行数:58,代码来源:celery_toolbox.py

示例5: set_config_params

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
 def set_config_params(self, vdisk, new_config_params, version):
     """
     Sets configuration parameters to a given vdisk.
     :param vdisk: Guid of the virtual disk to configure
     :param new_config_params: Configuration settings for the virtual disk
     :param version: API version
     """
     if version == 1 and 'dtl_target' in new_config_params:
         storage_router = StorageRouterList.get_by_ip(new_config_params['dtl_target'])
         if storage_router is None:
             raise NotAcceptable('API version 1 requires a Storage Router IP')
         new_config_params['dtl_target'] = storage_router.primary_failure_domain.guid
     return VDiskController.set_config_params.delay(vdisk_guid=vdisk.guid, new_config_params=new_config_params)
开发者ID:dawnpower,项目名称:framework,代码行数:15,代码来源:vdisks.py

示例6: __init__

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def __init__(self, ip):
        """
        Create RabbitMQ object

        :param ip: ip from the server
        :type ip: str
        """
        # check if rabbitmq is available on the ip
        if not RabbitMQ._check_rabbitmq_ip(ip):
            raise ValueError('RabbitMQ on {0} could not be found.'.format(ip))
        self._service_manager = ServiceFactory.get_manager()
        self.ip = ip
        if RabbitMQ.INTERNAL:
            self._storagerouter = StorageRouterList.get_by_ip(ip)
            self._client = SSHClient(ip, username='root')

        if not self.check_management_plugin():
            self.enable_management_plugin()
开发者ID:openvstorage,项目名称:openvstorage-health-check,代码行数:20,代码来源:rabbitmq.py

示例7: model_albanodes

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
 def model_albanodes(**kwargs):
     """
     Add all ALBA nodes known to the config platform to the model
     :param kwargs: Kwargs containing information regarding the node
     :type kwargs: dict
     :return: None
     """
     _ = kwargs
     if Configuration.dir_exists('/ovs/alba/asdnodes'):
         for node_id in Configuration.list('/ovs/alba/asdnodes'):
             node = AlbaNodeList.get_albanode_by_node_id(node_id)
             if node is None:
                 node = AlbaNode()
             main_config = Configuration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
             node.type = 'ASD'
             node.node_id = node_id
             node.ip = main_config['ip']
             node.port = main_config['port']
             node.username = main_config['username']
             node.password = main_config['password']
             node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
             node.save()
开发者ID:openvstorage,项目名称:framework-alba-plugin,代码行数:24,代码来源:albanode.py

示例8: set_config_params

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def set_config_params(self, vdisk, new_config_params, version):
        """
        Sets configuration parameters to a given vdisk.
        :param vdisk: Guid of the virtual disk to configure
        :type vdisk: VDisk
        :param new_config_params: Configuration settings for the virtual disk
        :type new_config_params: dict
        :param version: Client version
        :type version: int
        """
        if version == 1 and "dtl_target" in new_config_params:
            storage_router = StorageRouterList.get_by_ip(new_config_params["dtl_target"])
            if storage_router is None:
                raise HttpNotAcceptableException(
                    error_description="API version 1 requires a Storage Router IP", error="invalid_version"
                )
            new_config_params["dtl_target"] = [junction.domain_guid for junction in storage_router.domains]

        new_config_params.pop("dedupe_mode", None)
        new_config_params.pop("cache_strategy", None)
        new_config_params.pop("readcache_limit", None)
        return VDiskController.set_config_params.delay(vdisk_guid=vdisk.guid, new_config_params=new_config_params)
开发者ID:grimpy,项目名称:openvstorage,代码行数:24,代码来源:vdisks.py

示例9: flush_node

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
def flush_node(storagerouter_ip):
    """
    Flush write buffer to backend

    :param storagerouter_ip: ip of a storage router were to perform the flush
    :type storagerouter_ip: str
    """

    sr_info = StorageRouterList.get_by_ip(storagerouter_ip)
    vdisks_by_guid = sr_info.vdisks_guids

    for vdisk_guid in vdisks_by_guid:
        disk = VDisk(vdisk_guid)
        snapshot_name = "flush_snapshot"

    # create snapshot
    disk.storagedriver_client.create_snapshot(str(disk.volume_id), snapshot_name)

    # check if snapshot is synced to the backend
    while not disk.storagedriver_client.info_snapshot(str(disk.volume_id), snapshot_name).in_backend:
        time.sleep(5)

    # delete snapshot if sync is completed
    disk.storagedriver_client.delete_snapshot(str(disk.volume_id), snapshot_name)
开发者ID:DarumasLegs,项目名称:dev_ops,代码行数:26,代码来源:openvstorage.py

示例10: set_config_params

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def set_config_params(vdisk_guid, new_config_params):
        """
        Sets configuration parameters for a given vdisk.
        :param vdisk_guid: Guid of the virtual disk to set the configuration parameters for
        :param new_config_params: New configuration parameters
        """
        required_params = {'dtl_mode': (str, StorageDriverClient.VDISK_DTL_MODE_MAP.keys()),
                           'sco_size': (int, StorageDriverClient.TLOG_MULTIPLIER_MAP.keys()),
                           'dedupe_mode': (str, StorageDriverClient.VDISK_DEDUPE_MAP.keys()),
                           'write_buffer': (int, {'min': 128, 'max': 10 * 1024}),
                           'cache_strategy': (str, StorageDriverClient.VDISK_CACHE_MAP.keys()),
                           'readcache_limit': (int, {'min': 1, 'max': 10 * 1024}, False)}

        if new_config_params.get('dtl_target') is not None:
            required_params.update({'dtl_target': (str, Toolbox.regex_ip)})

        Toolbox.verify_required_params(required_params, new_config_params)

        if new_config_params['dtl_mode'] != 'no_sync' and new_config_params.get('dtl_target') is None:
            raise Exception('If DTL mode is Asynchronous or Synchronous, a target IP should always be specified')

        errors = False
        vdisk = VDisk(vdisk_guid)
        volume_id = str(vdisk.volume_id)
        old_config_params = VDiskController.get_config_params(vdisk.guid)

        # 1st update SCO size, because this impacts TLOG multiplier which on its turn impacts write buffer
        new_sco_size = new_config_params['sco_size']
        old_sco_size = old_config_params['sco_size']
        if new_sco_size != old_sco_size:
            write_buffer = float(new_config_params['write_buffer'])
            tlog_multiplier = StorageDriverClient.TLOG_MULTIPLIER_MAP[new_sco_size]
            sco_factor = write_buffer / tlog_multiplier / new_sco_size
            try:
                logger.info('Updating property sco_size on vDisk {0} to {1}'.format(vdisk_guid, new_sco_size))
                vdisk.storagedriver_client.set_sco_multiplier(volume_id, new_sco_size / 4 * 1024)
                vdisk.storagedriver_client.set_tlog_multiplier(volume_id, tlog_multiplier)
                vdisk.storagedriver_client.set_sco_cache_max_non_disposable_factor(volume_id, sco_factor)
                logger.info('Updated property sco_size')
            except Exception as ex:
                logger.error('Error updating "sco_size": {0}'.format(ex))
                errors = True

        # 2nd Check for DTL changes
        new_dtl_mode = new_config_params['dtl_mode']
        old_dtl_mode = old_config_params['dtl_mode']
        new_dtl_target = new_config_params.get('dtl_target')
        old_dtl_target = old_config_params['dtl_target']
        if old_dtl_mode != new_dtl_mode or new_dtl_target != old_dtl_target:
            if old_dtl_mode != new_dtl_mode and new_dtl_mode == 'no_sync':
                logger.info('Disabling DTL for vDisk {0}'.format(vdisk_guid))
                vdisk.storagedriver_client.set_manual_dtl_config(volume_id, None)
            elif (new_dtl_target is not None and new_dtl_target != old_dtl_target or old_dtl_mode != new_dtl_mode) and new_dtl_mode != 'no_sync':
                logger.info('Changing DTL to use global values for vDisk {0}'.format(vdisk_guid))
                sr_target = StorageRouterList.get_by_ip(new_dtl_target)
                if sr_target is None:
                    logger.error('Failed to retrieve Storage Router with IP {0}'.format(new_dtl_target))
                    errors = True
                for sd in sr_target.storagedrivers:
                    if sd.vpool == vdisk.vpool:
                        dtl_config = DTLConfig(str(new_dtl_target), sd.ports[2], StorageDriverClient.VDISK_DTL_MODE_MAP[new_dtl_mode])
                        vdisk.storagedriver_client.set_manual_dtl_config(volume_id, dtl_config)
                        break
                else:
                    logger.error('Failed to retrieve Storage Driver with IP {0}'.format(new_dtl_target))
                    errors = True

        # 2nd update rest
        for key in required_params:
            try:
                if key in ['sco_size', 'dtl_mode', 'dtl_target']:
                    continue

                new_value = new_config_params[key]
                old_value = old_config_params[key]
                if new_value != old_value:
                    logger.info('Updating property {0} on vDisk {1} from to {2}'.format(key, vdisk_guid, new_value))
                    if key == 'dedupe_mode':
                        vdisk.storagedriver_client.set_readcache_mode(volume_id, StorageDriverClient.VDISK_DEDUPE_MAP[new_value])
                    elif key == 'write_buffer':
                        tlog_multiplier = vdisk.storagedriver_client.get_tlog_multiplier(volume_id) or StorageDriverClient.TLOG_MULTIPLIER_MAP[new_sco_size]
                        sco_factor = float(new_value) / tlog_multiplier / new_sco_size
                        vdisk.storagedriver_client.set_sco_cache_max_non_disposable_factor(volume_id, sco_factor)
                    elif key == 'cache_strategy':
                        vdisk.storagedriver_client.set_readcache_behaviour(volume_id, StorageDriverClient.VDISK_CACHE_MAP[new_value])
                    elif key == 'readcache_limit':
                        vol_info = vdisk.storagedriver_client.info_volume(volume_id)
                        block_size = vol_info.lba_size * vol_info.cluster_multiplier or 4096
                        limit = new_value * 1024 * 1024 * 1024 / block_size if new_value else None
                        vdisk.storagedriver_client.set_readcache_limit(volume_id, limit)
                    else:
                        raise KeyError('Unsupported property provided: "{0}"'.format(key))
                    logger.info('Updated property {0}'.format(key))
            except Exception as ex:
                logger.error('Error updating "{0}": {1}'.format(key, ex))
                errors = True
        if errors is True:
            raise Exception('Failed to update the values for vDisk {0}'.format(vdisk.name))
开发者ID:jamie-liu,项目名称:openvstorage,代码行数:100,代码来源:vdisk.py

示例11: remove_node

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def remove_node(node_ip, silent=None):
        """
        Remove the node with specified IP from the cluster
        :param node_ip: IP of the node to remove
        :type node_ip: str
        :param silent: If silent == '--force-yes' no question will be asked to confirm the removal
        :type silent: str
        :return: None
        """
        from ovs.lib.storagedriver import StorageDriverController
        from ovs.lib.storagerouter import StorageRouterController
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        Toolbox.log(logger=NodeRemovalController._logger, messages="Remove node", boxed=True)
        Toolbox.log(
            logger=NodeRemovalController._logger,
            messages="WARNING: Some of these steps may take a very long time, please check the logs for more information\n\n",
        )

        ###############
        # VALIDATIONS #
        ###############
        try:
            node_ip = node_ip.strip()
            if not isinstance(node_ip, str):
                raise ValueError("Node IP must be a string")
            if not re.match(SSHClient.IP_REGEX, node_ip):
                raise ValueError("Invalid IP {0} specified".format(node_ip))

            storage_router_all = StorageRouterList.get_storagerouters()
            storage_router_masters = StorageRouterList.get_masters()
            storage_router_all_ips = set([storage_router.ip for storage_router in storage_router_all])
            storage_router_master_ips = set([storage_router.ip for storage_router in storage_router_masters])
            storage_router_to_remove = StorageRouterList.get_by_ip(node_ip)

            if node_ip not in storage_router_all_ips:
                raise ValueError(
                    "Unknown IP specified\nKnown in model:\n - {0}\nSpecified for removal:\n - {1}".format(
                        "\n - ".join(storage_router_all_ips), node_ip
                    )
                )

            if len(storage_router_all_ips) == 1:
                raise RuntimeError("Removing the only node is not possible")

            if node_ip in storage_router_master_ips and len(storage_router_master_ips) == 1:
                raise RuntimeError("Removing the only master node is not possible")

            if System.get_my_storagerouter() == storage_router_to_remove:
                raise RuntimeError(
                    "The node to be removed cannot be identical to the node on which the removal is initiated"
                )

            Toolbox.log(
                logger=NodeRemovalController._logger, messages="Creating SSH connections to remaining master nodes"
            )
            master_ip = None
            ip_client_map = {}
            storage_routers_offline = []
            storage_router_to_remove_online = True
            for storage_router in storage_router_all:
                try:
                    client = SSHClient(storage_router, username="root")
                    if client.run(["pwd"]):
                        Toolbox.log(
                            logger=NodeRemovalController._logger,
                            messages="  Node with IP {0:<15} successfully connected to".format(storage_router.ip),
                        )
                        ip_client_map[storage_router.ip] = client
                        if storage_router != storage_router_to_remove and storage_router.node_type == "MASTER":
                            master_ip = storage_router.ip
                except UnableToConnectException:
                    Toolbox.log(
                        logger=NodeRemovalController._logger,
                        messages="  Node with IP {0:<15} is unreachable".format(storage_router.ip),
                    )
                    storage_routers_offline.append(storage_router)
                    if storage_router == storage_router_to_remove:
                        storage_router_to_remove_online = False

            if len(ip_client_map) == 0 or master_ip is None:
                raise RuntimeError("Could not connect to any master node in the cluster")

            storage_router_to_remove.invalidate_dynamics("vdisks_guids")
            if (
                len(storage_router_to_remove.vdisks_guids) > 0
            ):  # vDisks are supposed to be moved away manually before removing a node
                raise RuntimeError("Still vDisks attached to Storage Router {0}".format(storage_router_to_remove.name))

            internal_memcached = Toolbox.is_service_internally_managed(service="memcached")
            internal_rabbit_mq = Toolbox.is_service_internally_managed(service="rabbitmq")
            memcached_endpoints = Configuration.get(key="/ovs/framework/memcache|endpoints")
            rabbit_mq_endpoints = Configuration.get(key="/ovs/framework/messagequeue|endpoints")
            copy_memcached_endpoints = list(memcached_endpoints)
            copy_rabbit_mq_endpoints = list(rabbit_mq_endpoints)
            for endpoint in memcached_endpoints:
                if endpoint.startswith(storage_router_to_remove.ip):
                    copy_memcached_endpoints.remove(endpoint)
            for endpoint in rabbit_mq_endpoints:
                if endpoint.startswith(storage_router_to_remove.ip):
#.........这里部分代码省略.........
开发者ID:openvstorage,项目名称:framework,代码行数:103,代码来源:noderemoval.py

示例12: get_package_information_core

# 需要导入模块: from ovs.dal.lists.storagerouterlist import StorageRouterList [as 别名]
# 或者: from ovs.dal.lists.storagerouterlist.StorageRouterList import get_by_ip [as 别名]
    def get_package_information_core(client, package_info):
        """
        Called by GenericController.refresh_package_information() every hour

        Retrieve information about the currently installed versions of the core packages
        Retrieve information about the versions to which each package can potentially be updated
        If installed version is different from candidate version --> store this information in model

        Additionally check the services with a 'run' file
        Verify whether the running version is up-to-date with the candidate version
        If different --> store this information in the model

        Result: Every package with updates or which requires services to be restarted is stored in the model

        :param client: Client on which to collect the version information
        :type client: SSHClient
        :param package_info: Dictionary passed in by the thread calling this function
        :type package_info: dict
        :return: Package information
        :rtype: dict
        """
        try:
            if client.username != 'root':
                raise RuntimeError('Only the "root" user can retrieve the package information')

            binaries = PackageManager.get_binary_versions(client=client, package_names=UpdateController.core_packages_with_binaries)
            installed = PackageManager.get_installed_versions(client=client, package_names=UpdateController.all_core_packages)
            candidate = PackageManager.get_candidate_versions(client=client, package_names=UpdateController.all_core_packages)
            if set(installed.keys()) != set(UpdateController.all_core_packages) or set(candidate.keys()) != set(UpdateController.all_core_packages):
                raise RuntimeError('Failed to retrieve the installed and candidate versions for packages: {0}'.format(', '.join(UpdateController.all_core_packages)))

            # Retrieve Arakoon information
            framework_arakoons = []
            storagedriver_arakoons = []
            for cluster, arakoon_list in {'cacc': framework_arakoons,
                                          'ovsdb': framework_arakoons,
                                          'voldrv': storagedriver_arakoons}.iteritems():
                cluster_name = ArakoonClusterConfig.get_cluster_name(cluster)
                if cluster_name is None:
                    continue

                if cluster == 'cacc':
                    arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(cluster_name=cluster_name, filesystem=True, ip=client.ip)
                else:
                    arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(cluster_name=cluster_name)

                if arakoon_metadata['internal'] is True:
                    arakoon_list.append(ArakoonInstaller.get_service_name_for_cluster(cluster_name=arakoon_metadata['cluster_name']))

            storagerouter = StorageRouterList.get_by_ip(client.ip)
            alba_proxies = []
            for service in storagerouter.services:
                if service.type.name == ServiceType.SERVICE_TYPES.ALBA_PROXY:
                    alba_proxies.append(service.name)

            storagedriver_services = []
            for sd in storagerouter.storagedrivers:
                storagedriver_services.append('ovs-dtl_{0}'.format(sd.vpool.name))
                storagedriver_services.append('ovs-volumedriver_{0}'.format(sd.vpool.name))

            default_entry = {'candidate': None,
                             'installed': None,
                             'services_to_restart': []}

            #                       component:   package_name: services_with_run_file
            for component, info in {'framework': {'arakoon': framework_arakoons,
                                                  'openvstorage': []},
                                    'storagedriver': {'alba': alba_proxies,
                                                      'arakoon': storagedriver_arakoons,
                                                      'volumedriver-no-dedup-base': [],
                                                      'volumedriver-no-dedup-server': storagedriver_services}}.iteritems():
                component_info = {}
                for package, services in info.iteritems():
                    for service in services:
                        service = ExtensionToolbox.remove_prefix(service, 'ovs-')
                        version_file = '/opt/OpenvStorage/run/{0}.version'.format(service)
                        if not client.file_exists(version_file):
                            UpdateController._logger.warning('{0}: Failed to find a version file in /opt/OpenvStorage/run for service {1}'.format(client.ip, service))
                            continue
                        package_name = package
                        running_versions = client.file_read(version_file).strip()
                        for version in running_versions.split(';'):
                            version = version.strip()
                            running_version = None
                            if '=' in version:
                                package_name = version.split('=')[0]
                                running_version = version.split('=')[1]
                            elif version:
                                running_version = version

                            if package_name not in UpdateController.all_core_packages:
                                raise ValueError('Unknown package dependency found in {0}'.format(version_file))
                            if package_name not in binaries:
                                raise RuntimeError('Binary version for package {0} was not retrieved'.format(package_name))

                            if running_version is not None and running_version != binaries[package_name]:
                                if package_name not in component_info:
                                    component_info[package_name] = copy.deepcopy(default_entry)
                                component_info[package_name]['installed'] = running_version
                                component_info[package_name]['candidate'] = binaries[package_name]
#.........这里部分代码省略.........
开发者ID:openvstorage,项目名称:framework,代码行数:103,代码来源:update.py


注:本文中的ovs.dal.lists.storagerouterlist.StorageRouterList.get_by_ip方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。