当前位置: 首页>>代码示例>>Python>>正文


Python VPoolList.get_vpools方法代码示例

本文整理汇总了Python中ovs.dal.lists.vpoollist.VPoolList.get_vpools方法的典型用法代码示例。如果您正苦于以下问题:Python VPoolList.get_vpools方法的具体用法?Python VPoolList.get_vpools怎么用?Python VPoolList.get_vpools使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在ovs.dal.lists.vpoollist.VPoolList的用法示例。


在下文中一共展示了VPoolList.get_vpools方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_vpool_stats

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def get_vpool_stats():
        """
        Send Vpool statistics to InfluxDB
        """
        points = []
        vpools = VPoolList.get_vpools()
        if len(vpools) == 0:
            StatsmonkeyScheduledTaskController._logger.info("No vpools found")
            return

        for vpool in vpools:
            try:
                metrics = StatsmonkeyScheduledTaskController._pop_realtime_info(vpool.statistics)
                vpool_name = vpool.name

                entry = {
                    'measurement': 'vpool_stats',
                    'tags': {
                        'vpool_name': vpool_name
                    },
                    'fields': metrics
                }
                points.append(entry)
            except Exception as ex:
                StatsmonkeyScheduledTaskController._logger.error(ex.message)

        if len(points) == 0:
            StatsmonkeyScheduledTaskController._logger.info("No statistics found")
            return

        StatsmonkeyScheduledTaskController._send_stats(points)
        return points
开发者ID:paperandsoap,项目名称:openvstorage-monitoring,代码行数:34,代码来源:statsmonkeyscheduledtask.py

示例2: check_filedrivers

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
 def check_filedrivers(result_handler):
     """
     Checks if the file drivers work on a local machine (compatible with multiple vPools)
     :param result_handler: logging object
     :type result_handler: ovs.extensions.healthcheck.result.HCResults
     """
     result_handler.info('Checking file drivers.', add_to_result=False)
     vpools = VPoolList.get_vpools()
     # perform tests
     if len(vpools) == 0:
         result_handler.skip('No vPools found!')
         return
     for vp in vpools:
         name = 'ovs-healthcheck-test-{0}'.format(VolumedriverHealthCheck.LOCAL_ID)
         if vp.guid not in VolumedriverHealthCheck.LOCAL_SR.vpools_guids:
             result_handler.skip('Skipping vPool {0} because it is not living here.'.format(vp.name))
             continue
         try:
             VolumedriverHealthCheck._check_filedriver(vp.name, name)
             if os.path.exists('/mnt/{0}/{1}.xml'.format(vp.name, name)):
                 # working
                 VolumedriverHealthCheck._check_filedriver_remove(vp.name)
                 result_handler.success('Filedriver for vPool {0} is working fine!'.format(vp.name))
             else:
                 # not working
                 result_handler.failure('Filedriver for vPool {0} seems to have problems!'.format(vp.name))
         except TimeoutError:
             # timeout occurred, action took too long
             result_handler.warning('Filedriver of vPool {0} seems to have `timeout` problems'.format(vp.name))
         except subprocess.CalledProcessError:
             # can be input/output error by filedriver
             result_handler.failure('Filedriver of vPool {0} seems to have `input/output` problems'.format(vp.name))
开发者ID:openvstorage,项目名称:openvstorage-health-check,代码行数:34,代码来源:volumedriver.py

示例3: check_volumedrivers

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def check_volumedrivers(result_handler):
        """
        Checks if the VOLUMEDRIVERS work on a local machine (compatible with multiple vPools)
        :param result_handler: logging object
        :type result_handler: ovs.extensions.healthcheck.result.HCResults
        :return: None
        :rtype: NoneType
        """
        result_handler.info('Checking volumedrivers.', add_to_result=False)
        vpools = VPoolList.get_vpools()
        if len(vpools) == 0:
            result_handler.skip('No vPools found!')
            return
        for vp in vpools:
            name = 'ovs-healthcheck-test-{0}.raw'.format(VolumedriverHealthCheck.LOCAL_ID)
            if vp.guid not in VolumedriverHealthCheck.LOCAL_SR.vpools_guids:
                result_handler.skip('Skipping vPool {0} because it is not living here.'.format(vp.name))
                continue
            try:
                # delete if previous vdisk with this name exists
                storagedriver_guid = next((storagedriver.guid for storagedriver in vp.storagedrivers
                                           if storagedriver.storagedriver_id == vp.name +
                                           VolumedriverHealthCheck.LOCAL_ID))
                # create a new one
                volume = VolumedriverHealthCheck._check_volumedriver(name, storagedriver_guid, result_handler)

                if volume is True:
                    # delete the recently created
                    try:
                        VolumedriverHealthCheck._check_volumedriver_remove(vpool_name=vp.name, vdisk_name=name)
                    except Exception as ex:
                        raise RuntimeError('Could not delete the created volume. Got {0}'.format(str(ex)))
                    # Working at this point
                    result_handler.success('Volumedriver of vPool {0} is working fine!'.format(vp.name))
                else:
                    # not working
                    result_handler.failure('Something went wrong during vdisk creation on vpool {0}.'.format(vp.name))

            except TimeoutError:
                # timeout occurred, action took too long
                result_handler.warning('Volumedriver of vPool {0} seems to timeout.'.format(vp.name))
            except IOError as ex:
                # can be input/output error by volumedriver
                result_handler.failure('Volumedriver of vPool {0} seems to have IO problems. Got `{1}` while executing.'.format(vp.name, ex.message))
            except RuntimeError as ex:
                result_handler.failure('Volumedriver of vPool {0} seems to have problems. Got `{1}` while executing.'.format(vp.name, ex))
            except VDiskNotFoundError:
                result_handler.warning('Volume on vPool {0} was not found, please retry again'.format(vp.name))
            except Exception as ex:
                result_handler.failure('Uncaught exception for Volumedriver of vPool {0}.Got {1} while executing.'.format(vp.name, ex))
            finally:
                # Attempt to delete the created vdisk
                try:
                    VolumedriverHealthCheck._check_volumedriver_remove(vpool_name=vp.name, vdisk_name=name, present=False)
                except:
                    pass
开发者ID:openvstorage,项目名称:openvstorage-health-check,代码行数:58,代码来源:volumedriver.py

示例4: check_model_consistency

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def check_model_consistency(result_handler):
        """
        Checks if the model consistency of OVSDB vs. VOLUMEDRIVER and does a preliminary check on RABBITMQ
        :param result_handler: logging object
        :type result_handler: ovs.extensions.healthcheck.result.HCResults
        :return: None
        :rtype: NoneType
        """
        result_handler.info('Checking model consistency: ')

        # Checking consistency of volumedriver vs. ovsdb and backwards
        for vp in VPoolList.get_vpools():
            if vp.guid not in OpenvStorageHealthCheck.LOCAL_SR.vpools_guids:
                result_handler.skip('Skipping vPool {0} because it is not living here.'.format(vp.name))
                continue
            result_handler.info('Checking consistency of volumedriver vs. ovsdb for {0}: '.format(vp.name), add_to_result=False)
            missing_in_volumedriver = []
            missing_in_model = []
            try:
                # noinspection PyArgumentList
                voldrv_volume_list = vp.storagedriver_client.list_volumes()
            except (ClusterNotReachableException, RuntimeError) as ex:
                result_handler.warning('Seems like the volumedriver {0} is not running. Got {1}'.format(vp.name, str(ex)),
                                       code=ErrorCodes.voldrv_connection_problem)
                continue

            vdisk_volume_ids = []
            # Cross-reference model vs. volumedriver
            for vdisk in vp.vdisks:
                vdisk_volume_ids.append(vdisk.volume_id)
                if vdisk.volume_id not in voldrv_volume_list:
                    missing_in_volumedriver.append(vdisk.guid)
                else:
                    voldrv_volume_list.remove(vdisk.volume_id)
            # Cross-reference volumedriver vs. model
            for voldrv_id in voldrv_volume_list:
                if voldrv_id not in vdisk_volume_ids:
                    missing_in_model.append(voldrv_id)

            # Display discrepancies for vPool
            if len(missing_in_volumedriver) != 0:
                result_handler.warning('Detected volumes that are MISSING in volumedriver but are in ovsdb in vpool: {0} - vdisk guid(s):{1}.'
                                       .format(vp.name, ' '.join(missing_in_volumedriver)),
                                       code=ErrorCodes.missing_volumedriver)
            else:
                result_handler.success('No discrepancies found for ovsdb in vPool {0}'.format(vp.name), code=ErrorCodes.missing_volumedriver)

            if len(missing_in_model) != 0:
                result_handler.warning('Detected volumes that are AVAILABLE in volumedriver but are not in ovsdb in vpool: {0} - vdisk volume id(s):{1}'
                                       .format(vp.name, ', '.join(missing_in_model)),
                                       code=ErrorCodes.missing_ovsdb)
            else:
                result_handler.success('No discrepancies found for voldrv in vpool {0}'.format(vp.name), code=ErrorCodes.missing_ovsdb)
开发者ID:openvstorage,项目名称:openvstorage-health-check,代码行数:55,代码来源:generic.py

示例5: _ns_statistics

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def _ns_statistics(self):
        """
        Returns a list of the ASDs namespaces
        """
        # Collect ALBA related statistics
        alba_dataset = {}
        for namespace in self.ns_data:
            if namespace['namespace']['state'] != 'active':
                continue
            alba_dataset[namespace['name']] = namespace['statistics']

        # Collect vPool/vDisk data
        vdisk_dataset = {}
        for vpool in VPoolList.get_vpools():
            vdisk_dataset[vpool] = vpool.storagedriver_client.list_volumes()

        # Collect global usage
        global_usage = {'size': 0,
                        'used': 0}
        for stats in self.asd_statistics.values():
            global_usage['size'] += stats['capacity']
            global_usage['used'] += stats['disk_usage']

        # Cross merge
        dataset = {'global': {'size': global_usage['size'],
                              'used': global_usage['used']},
                   'vpools': {},
                   'overhead': 0,
                   'unknown': {'storage': 0,
                               'logical': 0}}
        for vpool in vdisk_dataset:
            for namespace in vdisk_dataset[vpool]:
                if namespace in alba_dataset:
                    if vpool.guid not in dataset['vpools']:
                        dataset['vpools'][vpool.guid] = {'storage': 0,
                                                         'logical': 0}
                    dataset['vpools'][vpool.guid]['storage'] += alba_dataset[namespace]['storage']
                    dataset['vpools'][vpool.guid]['logical'] += alba_dataset[namespace]['logical']
                    del alba_dataset[namespace]
            fd_namespace = 'fd-{0}-{1}'.format(vpool.name, vpool.guid)
            if fd_namespace in alba_dataset:
                if vpool.guid not in dataset['vpools']:
                    dataset['vpools'][vpool.guid] = {'storage': 0,
                                                     'logical': 0}
                dataset['vpools'][vpool.guid]['storage'] += alba_dataset[fd_namespace]['storage']
                dataset['vpools'][vpool.guid]['logical'] += alba_dataset[fd_namespace]['logical']
                del alba_dataset[fd_namespace]
        for namespace in alba_dataset:
            dataset['unknown']['storage'] += alba_dataset[namespace]['storage']
            dataset['unknown']['logical'] += alba_dataset[namespace]['logical']
        dataset['overhead'] = max(0, dataset['global']['used'] - dataset['unknown']['storage'] - sum(usage['storage'] for usage in dataset['vpools'].values()))
        return dataset
开发者ID:DarumasLegs,项目名称:framework-alba-plugin,代码行数:54,代码来源:albabackend.py

示例6: mds_checkup

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
 def mds_checkup():
     """
     Validates the current MDS setup/configuration and takes actions where required
     """
     mds_dict = {}
     for vpool in VPoolList.get_vpools():
         for mds_service in vpool.mds_services:
             storagerouter = mds_service.service.storagerouter
             if vpool not in mds_dict:
                 mds_dict[vpool] = {}
             if storagerouter not in mds_dict[vpool]:
                 mds_dict[vpool][storagerouter] = {'client': SSHClient(storagerouter, username='root'),
                                                   'services': []}
             mds_dict[vpool][storagerouter]['services'].append(mds_service)
     for vpool, storagerouter_info in mds_dict.iteritems():
         # 1. First, make sure there's at least one MDS on every StorageRouter that's not overloaded
         # If not, create an extra MDS for that StorageRouter
         for storagerouter in storagerouter_info:
             client = mds_dict[vpool][storagerouter]['client']
             mds_services = mds_dict[vpool][storagerouter]['services']
             has_room = False
             for mds_service in mds_services[:]:
                 if mds_service.capacity == 0 and len(mds_service.vdisks_guids) == 0:
                     client = SSHClient(storagerouter)
                     MDSServiceController.remove_mds_service(mds_service, client, storagerouter, vpool, reload_config=True)
                     mds_services.remove(mds_service)
             for mds_service in mds_services:
                 _, load = MDSServiceController.get_mds_load(mds_service)
                 if load < Configuration.get('ovs.storagedriver.mds.maxload'):
                     has_room = True
                     break
             if has_room is False:
                 mds_service = MDSServiceController.prepare_mds_service(client, storagerouter, vpool,
                                                                        fresh_only=False, reload_config=True)
                 if mds_service is None:
                     raise RuntimeError('Could not add MDS node')
                 mds_services.append(mds_service)
         mds_config_set = MDSServiceController.get_mds_storagedriver_config_set(vpool)
         for storagerouter in mds_dict[vpool]:
             client = mds_dict[vpool][storagerouter]['client']
             storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
             storagedriver_config.load(client)
             if storagedriver_config.is_new is False:
                 storagedriver_config.clean()  # Clean out obsolete values
                 storagedriver_config.configure_filesystem(
                     fs_metadata_backend_mds_nodes=mds_config_set[storagerouter.guid]
                 )
                 storagedriver_config.save(client)
         # 2. Per VPool, execute a safety check, making sure the master/slave configuration is optimal.
         for vdisk in vpool.vdisks:
             MDSServiceController.ensure_safety(vdisk)
开发者ID:jianyongchen,项目名称:openvstorage,代码行数:53,代码来源:mdsservice.py

示例7: list_volumes

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
 def list_volumes(vpool_guid=None):
     """
     List all known volumes on a specific vpool or on all
     """
     if vpool_guid is not None:
         vpool = VPool(vpool_guid)
         storagedriver_client = StorageDriverClient.load(vpool)
         response = storagedriver_client.list_volumes()
     else:
         response = []
         for vpool in VPoolList.get_vpools():
             storagedriver_client = StorageDriverClient.load(vpool)
             response.extend(storagedriver_client.list_volumes())
     return response
开发者ID:tcpcloud,项目名称:openvstorage,代码行数:16,代码来源:vdisk.py

示例8: checkForHaltedVolumes

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def checkForHaltedVolumes(self):

        self.utility.logger("Checking for halted volumes: ", self.module, 3, 'checkHaltedVolumes', False)

        vpools = VPoolList.get_vpools()

        if len(vpools) != 0:

            for vp in vpools:

                haltedVolumes = []

                self.utility.logger("Checking vPool '{0}': ".format(vp.name), self.module, 3,
                                    'checkVPOOL_{0}'.format(vp.name), False)

                config_file = self.utility.fetchConfigFilePath(vp.name, self.machine_id, 1, vp.guid)
                voldrv_client = src.LocalStorageRouterClient(config_file)

                for volume in voldrv_client.list_volumes():
                    # check if volume is halted, returns: 0 or 1
                    try:
                        if int(self.utility.parseXMLtoJSON(voldrv_client.info_volume(volume))
                                   ["boost_serialization"]["XMLRPCVolumeInfo"]["halted"]):
                            haltedVolumes.append(volume)
                    except ObjectNotFoundException:
                        # ignore ovsdb invalid entrees
                        # model consistency will handle it.
                        continue
                    except MaxRedirectsExceededException:
                        # this means the volume is not halted but detached or unreachable for the volumedriver
                        haltedvolumes.append(volume)

                # print all results
                if len(haltedVolumes) > 0:
                    self.utility.logger("Detected volumes that are HALTED in volumedriver in vPool '{0}': {1}"
                                        .format(vp.name, ', '.join(haltedVolumes)), self.module, 0,
                                        'halted')
                else:
                    self.utility.logger("No halted volumes detected in vPool '{0}'"
                                        .format(vp.name), self.module, 1,
                                        'halted')

        else:
            self.utility.logger("No vPools found!".format(len(vpools)), self.module, 5, 'halted')
开发者ID:DarumasLegs,项目名称:openvstorage-health-check,代码行数:46,代码来源:openvstoragecluster_health_check.py

示例9: checkFileDriver

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def checkFileDriver(self):
        filedriversNotWorking = []
        name = "ovs-healthcheck-test-{0}".format(self.machine_id)

        self.utility.logger("Checking filedrivers: ", self.module, 3, 'checkFileDrivers', False)

        vpools = VPoolList.get_vpools()

        # perform tests
        if len(vpools) != 0:

            for vp in vpools:

                # check filedriver
                t = threading.Thread(target=self._checkFiledriver, args=(1, vp.name, name))
                t.daemon = True
                t.start()

                time.sleep(5)

                # if thread is still alive after x seconds or got exception, something is wrong
                if t.isAlive() or not os.path.exists("/mnt/{0}/{1}.xml".format(vp.name, name)):
                    filedriversNotWorking.append(vp.name)

                # clean-up
                if len(filedriversNotWorking) == 0:
                    self.utility.executeBashCommand("rm -f /mnt/{0}/{1}.xml".format(vp.name, name))


            # check if filedrivers are OK!
            if len(filedriversNotWorking) == 0:
                self.utility.logger("All filedrivers seem to be working fine!", self.module, 1, 'filedrivers')
            else:
                self.utility.logger("Some filedrivers seem to have some problems: {0}"
                                    .format(', '.join(filedriversNotWorking)), self.module, 0, 'filedrivers')

        else:
            self.utility.logger("No vPools found!", self.module, 5, 'filedrivers')
开发者ID:jtorreke,项目名称:openvstorage-health-check,代码行数:40,代码来源:openvstoragecluster_health_check.py

示例10: _bootstrap_dal_models

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]

#.........这里部分代码省略.........
                    self._register_dal_model(1, vd, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.20", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.27", key = "data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.30", key = "operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.34", key = "data_transferred", atype = int)
                    self._register_dal_model(1, vd, 'info', "3", key = 'stored', atype = int)
                    self._register_dal_model(1, vd, 'info', "4", key = 'failover_mode', atype = int)
                    self._register_dal_model(1, vd, 'snapshots', "5", atype = int)
                    self.instance_oid += 1

            for pm in PMachineList.get_pmachines():
                _guids.add(pm.guid)
                if not self._check_added(pm):
                    self._register_dal_model(2, pm, 'guid', "0")
                    self._register_dal_model(2, pm, 'name', "1")
                    self._register_dal_model(2, pm, 'host_status', "2")
                    self.instance_oid += 1

            for vp in VPoolList.get_vpools():
                _guids.add(vp.guid)
                if not self._check_added(vp):
                    self._register_dal_model(3, vp, 'guid', "0")
                    self._register_dal_model(3, vp, 'name', "1")
                    self._register_dal_model(3, vp, 'statistics', "2.0", key = "operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.2", key = "data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.6", key = "write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.11", key = "backend_data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.12", key = "cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.20", key = "data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.27", key = "data_written", atype = int)
开发者ID:DarumasLegs,项目名称:framework,代码行数:70,代码来源:ovssnmpserver.py

示例11: migrate

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]

#.........这里部分代码省略.........

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter, username='root')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {'type': DataList.where_operator.AND,
                                             'items': []})
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
开发者ID:DarumasLegs,项目名称:framework,代码行数:70,代码来源:ovsmigrator.py

示例12: migrate

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]

#.........这里部分代码省略.........
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(unique_key.format(property_name), hashlib.sha1(str(data[property_name])).hexdigest())
                            client.set(ukey, key)

            # Complete rework of the way we detect devices to assign roles or use as ASD
            # Allow loop-, raid-, nvme-, ??-devices and logical volumes as ASD (https://github.com/openvstorage/framework/issues/792)
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException
            from ovs.lib.disk import DiskController

            for storagerouter in StorageRouterList.get_storagerouters():
                try:
                    client = SSHClient(storagerouter, username='root')
                except UnableToConnectException:
                    raise

                # Retrieve all symlinks for all devices
                # Example of name_alias_mapping:
                # {'/dev/md0': ['/dev/disk/by-id/md-uuid-ad2de634:26d97253:5eda0a23:96986b76', '/dev/disk/by-id/md-name-OVS-1:0'],
                #  '/dev/sda': ['/dev/disk/by-path/pci-0000:03:00.0-sas-0x5000c295fe2ff771-lun-0'],
                #  '/dev/sda1': ['/dev/disk/by-uuid/e3e0bc62-4edc-4c6b-a6ce-1f39e8f27e41', '/dev/disk/by-path/pci-0000:03:00.0-sas-0x5000c295fe2ff771-lun-0-part1']}
                name_alias_mapping = {}
                alias_name_mapping = {}
                for path_type in client.dir_list(directory='/dev/disk'):
                    if path_type in ['by-uuid', 'by-partuuid']:  # UUIDs can change after creating a filesystem on a partition
                        continue
                    directory = '/dev/disk/{0}'.format(path_type)
                    for symlink in client.dir_list(directory=directory):
                        symlink_path = '{0}/{1}'.format(directory, symlink)
                        link = client.file_read_link(symlink_path)
                        if link not in name_alias_mapping:
                            name_alias_mapping[link] = []
                        name_alias_mapping[link].append(symlink_path)
                        alias_name_mapping[symlink_path] = link

                for disk in storagerouter.disks:
                    if disk.aliases is None:
                        # noinspection PyProtectedMember
                        device_path = '/dev/{0}'.format(disk.name)
                        disk.aliases = name_alias_mapping.get(device_path, [device_path])
                        disk.save()
                    for partition in disk.partitions:
                        if partition.aliases is None:
                            # noinspection PyProtectedMember
                            partition_device = alias_name_mapping.get(partition._data.get('path'))
                            if partition_device is None:
                                partition.aliases = []
                                partition.save()
                                continue
                            partition.aliases = name_alias_mapping.get(partition_device, [])
                            partition.save()

                DiskController.sync_with_reality(storagerouter_guid=storagerouter.guid)

            # Only support ALBA backend type
            from ovs.dal.lists.backendtypelist import BackendTypeList
            for backend_type in BackendTypeList.get_backend_types():
                if backend_type.code != 'alba':
                    backend_type.delete()

            # Reformat the vpool.metadata information
            from ovs.dal.lists.vpoollist import VPoolList
            for vpool in VPoolList.get_vpools():
                new_metadata = {}
                for metadata_key, value in vpool.metadata.items():
                    new_info = {}
                    storagerouter_guids = [key for key in vpool.metadata.keys() if not key.startswith('backend')]
                    if isinstance(value, dict):
                        read_cache = value.get('backend_info', {}).get('fragment_cache_on_read', True)
                        write_cache = value.get('backend_info', {}).get('fragment_cache_on_write', False)
                        new_info['backend_info'] = {'alba_backend_guid': value.get('backend_guid'),
                                                    'backend_guid': None,
                                                    'frag_size': value.get('backend_info', {}).get('frag_size'),
                                                    'name': value.get('name'),
                                                    'policies': value.get('backend_info', {}).get('policies'),
                                                    'preset': value.get('preset'),
                                                    'sco_size': value.get('backend_info', {}).get('sco_size'),
                                                    'total_size': value.get('backend_info', {}).get('total_size')}
                        new_info['arakoon_config'] = value.get('arakoon_config')
                        new_info['connection_info'] = {'host': value.get('connection', {}).get('host', ''),
                                                       'port': value.get('connection', {}).get('port', ''),
                                                       'local': value.get('connection', {}).get('local', ''),
                                                       'client_id': value.get('connection', {}).get('client_id', ''),
                                                       'client_secret': value.get('connection', {}).get('client_secret', '')}
                        if metadata_key == 'backend':
                            new_info['caching_info'] = dict((sr_guid, {'fragment_cache_on_read': read_cache, 'fragment_cache_on_write': write_cache}) for sr_guid in storagerouter_guids)
                    if metadata_key in storagerouter_guids:
                        metadata_key = 'backend_aa_{0}'.format(metadata_key)
                    new_metadata[metadata_key] = new_info
                vpool.metadata = new_metadata
                vpool.save()

            # Removal of READ role
            from ovs.dal.lists.diskpartitionlist import DiskPartitionList
            for partition in DiskPartitionList.get_partitions():
                if 'READ' in partition.roles:
                    partition.roles.remove('READ')
                    partition.save()

        return OVSMigrator.THIS_VERSION
开发者ID:grimpy,项目名称:openvstorage,代码行数:104,代码来源:ovsmigrator.py

示例13: cluster_registry_checkup

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def cluster_registry_checkup():
        """
        Verify whether changes have occurred in the cluster registry for each vPool
        :return: Information whether changes occurred
        :rtype: dict
        """
        changed_vpools = {}
        for vpool in VPoolList.get_vpools():
            changed_vpools[vpool.guid] = {'changes': False,
                                          'success': True}
            try:
                StorageDriverController._logger.info('Validating cluster registry settings for Vpool {0}'.format(vpool.guid))

                current_configs = vpool.clusterregistry_client.get_node_configs()
                changes = len(current_configs) == 0
                node_configs = []
                for sd in vpool.storagedrivers:
                    sd.invalidate_dynamics(['cluster_node_config'])
                    new_config = sd.cluster_node_config
                    node_configs.append(ClusterNodeConfig(**new_config))
                    if changes is False:
                        current_node_configs = [config for config in current_configs if config.vrouter_id == sd.storagedriver_id]
                        if len(current_node_configs) == 1:
                            current_node_config = current_node_configs[0]
                            for key in new_config:
                                if getattr(current_node_config, key) != new_config[key]:
                                    changes = True
                                    break
                changed_vpools[vpool.guid]['changes'] = changes

                if changes is True:
                    StorageDriverController._logger.info('Cluster registry settings for Vpool {0} needs to be updated'.format(vpool.guid))
                    available_storagedrivers = []
                    for sd in vpool.storagedrivers:
                        storagerouter = sd.storagerouter
                        try:
                            SSHClient(storagerouter, username='root')
                            with remote(storagerouter.ip, [LocalStorageRouterClient]) as rem:
                                sd_key = '/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, sd.storagedriver_id)
                                if Configuration.exists(sd_key) is True:
                                    path = Configuration.get_configuration_path(sd_key)
                                    lsrc = rem.LocalStorageRouterClient(path)
                                    lsrc.server_revision()  # 'Cheap' call to verify whether volumedriver is responsive
                                    available_storagedrivers.append(sd)
                        except UnableToConnectException:
                            StorageDriverController._logger.warning('StorageRouter {0} not available.'.format(storagerouter.name))
                        except Exception as ex:
                            if 'ClusterNotReachableException' in str(ex):
                                StorageDriverController._logger.warning('StorageDriver {0} on StorageRouter {1} not available.'.format(
                                    sd.guid, storagerouter.name
                                ))
                            else:
                                StorageDriverController._logger.exception('Got exception when validating StorageDriver {0} on StorageRouter {1}.'.format(
                                    sd.guid, storagerouter.name
                                ))

                    StorageDriverController._logger.info('Updating cluster node configs for VPool {0}'.format(vpool.guid))
                    vpool.clusterregistry_client.set_node_configs(node_configs)
                    for sd in available_storagedrivers:
                        StorageDriverController._logger.info('Trigger config reload for StorageDriver {0}'.format(sd.guid))
                        vpool.storagedriver_client.update_cluster_node_configs(str(sd.storagedriver_id), req_timeout_secs=10)
                    StorageDriverController._logger.info('Updating cluster node configs for Vpool {0} completed'.format(vpool.guid))
                else:
                    StorageDriverController._logger.info('Cluster registry settings for Vpool {0} is up to date'.format(vpool.guid))
            except Exception as ex:
                StorageDriverController._logger.exception('Got exception when validating cluster registry settings for Vpool {0}.'.format(vpool.name))
                changed_vpools[vpool.guid]['success'] = False
                changed_vpools[vpool.guid]['error'] = ex.message
        return changed_vpools
开发者ID:openvstorage,项目名称:framework,代码行数:71,代码来源:storagedriver.py

示例14: checkModelConsistency

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def checkModelConsistency(self):

        self.utility.logger("Checking model consistency: ", self.module, 3, 'checkModelConsistency', False)

        #
        # RabbitMQ check: cluster verification
        #

        self.utility.logger("Precheck: verification of RabbitMQ cluster: ", self.module, 3,
                                'checkRabbitMQcluster', False)

        if self.utility.node_type == "MASTER":

            cluster_status = self.utility.executeBashCommand("rabbitmqctl cluster_status")

            if "Error" not in cluster_status[1]:

                # this can happen
                if len(cluster_status) <= 3:
                    partition_status = cluster_status[2]
                else:
                    partition_status = cluster_status[3]

                # check parition status
                if '@' in partition_status:
                    self.utility.logger("Seems like the RabbitMQ cluster has 'partition' problems, please check this...",
                                        self.module, 0, 'process_rabbitmq', False)
                else:
                    self.utility.logger("RabbitMQ does not seem to have 'partition' problems!", self.module, 1,
                                        'process_rabbitmq', False)
            else:
                self.utility.logger("Seems like the RabbitMQ cluster has errors, maybe it is offline?", self.module, 0,
                                    'process_rabbitmq', False)

        else:
            self.utility.logger("RabbitMQ is not running/active on this server!", self.module, 5,
                                'process_rabbitmq', False)

        #
        # Checking consistency of volumedriver vs. ovsdb and backwards
        #

        for vp in VPoolList.get_vpools():

            self.utility.logger("Checking consistency of volumedriver vs. ovsdb for vPool '{0}': ".format(vp.name),
                                self.module, 3, 'checkDiscrepanciesVoldrvOvsdb', False)

            # list of vdisks that are in model but are not in volumedriver
            missingInVolumedriver = []

            # list of volumes that are in volumedriver but are not in model
            missingInModel = []

            # fetch configfile of vpool for the volumedriver
            config_file = self.utility.fetchConfigFilePath(vp.name, self.machine_id, 1, vp.guid)
            voldrv_client = src.LocalStorageRouterClient(config_file)

            # collect data from volumedriver
            voldrv_volume_list = voldrv_client.list_volumes()

            # collect data from model
            model_vdisk_list = vp.vdisks
            vol_ids = [vdisk.volume_id for vdisk in vp.vdisks]

            # crossreference model vs. volumedriver
            for vdisk in vol_ids:
                if vdisk not in voldrv_volume_list:
                    missingInVolumedriver.append(vdisk)

            # crossreference volumedriver vs. model
            for voldrv_id in voldrv_volume_list:
                if voldrv_id not in vol_ids:
                    missingInModel.append(voldrv_id)

            # display discrepancies for vPool
            if len(missingInVolumedriver) != 0:
                self.utility.logger("Detected volumes that are MISSING in volumedriver but ARE in ovsdb in vPool "
                                    "'{0}': {1}".format(vp.name, ', '.join(missingInVolumedriver)), self.module, 0,
                                    'discrepancies_ovsdb_{0}'.format(vp.name))
            else:
                self.utility.logger("NO discrepancies found for ovsdb in vPool '{0}'".format(vp.name), self.module, 1,
                                    'discrepancies_ovsdb_{0}'.format(vp.name))

            if len(missingInModel) != 0:
                self.utility.logger("Detected volumes that are AVAILABLE in volumedriver but ARE NOT in ovsdb in vPool "
                                    "'{0}': {1}".format(vp.name, ', '.join(missingInModel)), self.module, 0,
                                    'discrepancies_voldrv_{0}'.format(vp.name))
            else:
                self.utility.logger("NO discrepancies found for voldrv in vPool '{0}'".format(vp.name), self.module, 1,
                                    'discrepancies_voldrv_{0}'.format(vp.name))
开发者ID:jtorreke,项目名称:openvstorage-health-check,代码行数:92,代码来源:openvstoragecluster_health_check.py

示例15: _ns_statistics

# 需要导入模块: from ovs.dal.lists.vpoollist import VPoolList [as 别名]
# 或者: from ovs.dal.lists.vpoollist.VPoolList import get_vpools [as 别名]
    def _ns_statistics(self):
        """
        Returns a list of the ASDs namespaces
        """
        # Collect ALBA related statistics
        alba_dataset = {}
        for namespace in self.ns_data:
            if namespace['namespace']['state'] != 'active':
                continue
            alba_dataset[namespace['name']] = namespace['statistics']
        # Collect vPool/vDisk data
        vdisk_dataset = {}
        for vpool in VPoolList.get_vpools():
            if vpool not in vdisk_dataset:
                vdisk_dataset[vpool] = []
            for vdisk in vpool.vdisks:
                vdisk_dataset[vpool].append(vdisk.volume_id)

        # Load disk statistics
        def _load_disks(_node, _dict):
            for _asd in _node.all_disks:
                if 'asd_id' in _asd and _asd['asd_id'] in asds and 'usage' in _asd:
                    _dict['size'] += _asd['usage']['size']
                    _dict['used'] += _asd['usage']['used']

        global_usage = {'size': 0,
                        'used': 0}
        nodes = set()
        asds = []
        for asd in self.asds:
            asds.append(asd.asd_id)
            if asd.alba_node not in nodes:
                nodes.add(asd.alba_node)
        threads = []
        for node in nodes:
            thread = Thread(target=_load_disks, args=(node, global_usage))
            thread.start()
            threads.append(thread)
        for thread in threads:
            thread.join()

        # Cross merge
        dataset = {'global': {'size': global_usage['size'],
                              'used': global_usage['used']},
                   'vpools': {},
                   'unknown': {'storage': 0,
                               'logical': 0}}
        for vpool in vdisk_dataset:
            for namespace in vdisk_dataset[vpool]:
                if namespace in alba_dataset:
                    if vpool.guid not in dataset['vpools']:
                        dataset['vpools'][vpool.guid] = {'storage': 0,
                                                         'logical': 0}
                    dataset['vpools'][vpool.guid]['storage'] += alba_dataset[namespace]['storage']
                    dataset['vpools'][vpool.guid]['logical'] += alba_dataset[namespace]['logical']
                    del alba_dataset[namespace]
            fd_namespace = 'fd-{0}-{1}'.format(vpool.name, vpool.guid)
            if fd_namespace in alba_dataset:
                if vpool.guid not in dataset['vpools']:
                    dataset['vpools'][vpool.guid] = {'storage': 0,
                                                     'logical': 0}
                dataset['vpools'][vpool.guid]['storage'] += alba_dataset[fd_namespace]['storage']
                dataset['vpools'][vpool.guid]['logical'] += alba_dataset[fd_namespace]['logical']
                del alba_dataset[fd_namespace]
        for namespace in alba_dataset:
            dataset['unknown']['storage'] += alba_dataset[namespace]['storage']
            dataset['unknown']['logical'] += alba_dataset[namespace]['logical']
        return dataset
开发者ID:tbogaert,项目名称:framework-alba-plugin,代码行数:70,代码来源:albabackend.py


注:本文中的ovs.dal.lists.vpoollist.VPoolList.get_vpools方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。