本文整理汇总了Python中cafe.drivers.unittest.datasets.DatasetList类的典型用法代码示例。如果您正苦于以下问题:Python DatasetList类的具体用法?Python DatasetList怎么用?Python DatasetList使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DatasetList类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: images_by_volume_type
def images_by_volume_type(
cls, max_datasets=None, randomize=False, image_filter=None,
volume_type_filter=None):
"""Returns a DatasetList of permuations of Volume Types and Images.
Requests all available images and volume types from API, and applies
image_filter and volume_type_filter if provided.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._filter_model_list(cls._images(), image_filter)
volume_type_list = cls._filter_model_list(
cls._volume_types(), volume_type_filter)
# Create dataset from all combinations of all images and volume types
dataset_list = DatasetList()
for vol_type in volume_type_list:
for img in image_list:
data = {'volume_type': vol_type,
'image': img}
testname = "{0}_volume_from_{1}_image".format(
vol_type.name,
str(img.name).replace(" ", "_"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
if randomize:
shuffle(dataset_list)
if max_datasets:
dataset_list = dataset_list[:max_datasets]
return dataset_list
示例2: images_by_volume_type
def images_by_volume_type(
cls, max_datasets=None, randomize=False,
image_filter=None, volume_type_filter=None,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all combinations of Images and
Volume Types.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=volume_type_filter,
filter_mode=volume_type_filter_mode)
# Create dataset from all combinations of all images and volume types
dataset_list = DatasetList()
for vtype in volume_type_list:
for image in image_list:
data = {'volume_type': vtype,
'image': image}
testname = \
"{0}_and_{1}".format(
str(vtype.name).replace(" ", "_"),
str(image.name).replace(" ", "_"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例3: volume_types_with_restore_control
def volume_types_with_restore_control(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=BlockstorageDatasets.INCLUSION_MODE):
"""Returns a DatasetList of all VolumeTypes
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values.
"""
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=model_filter,
filter_mode=filter_mode)
dataset_list = DatasetList()
is_enabled = \
cls._volumes.config.allow_snapshot_restore_to_different_type
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_,
'restore_to_different_type_enabled': is_enabled}
test_name = "{0}_to_other_is_{1}".format(
vol_type.name, "allowed" if is_enabled else "disabled")
dataset_list.append_new_dataset(test_name, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例4: images_by_flavor
def images_by_flavor(
cls, max_datasets=None, randomize=False,
image_filter=None, flavor_filter=None,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all combinations of Flavors and Images.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=flavor_filter,
filter_mode=flavor_filter_mode)
dataset_list = DatasetList()
for image in image_list:
for flavor in flavor_list:
data = {'flavor': flavor,
'image': image}
testname = \
"image_{0}_and_flavor_{1}".format(
str(image.name).replace(" ", "_").replace("/", "-"),
str(flavor.name).replace(" ", "_").replace("/", "-"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例5: volume_types
def volume_types(
cls, max_datasets=None, randomize=None, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE, tags=None):
"""Returns a DatasetList of all VolumeTypes
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=model_filter,
filter_mode=filter_mode)
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
# Apply modifiers
dataset_list = cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
# Apply Tags
if tags:
dataset_list.apply_test_tags(*tags)
return dataset_list
示例6: default_volume_type
def default_volume_type(cls):
vol_type = cls.default_volume_type_model()
dataset = _Dataset(
name=vol_type.name,
data_dict={
'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_})
dataset_list = DatasetList()
dataset_list.append(dataset)
return dataset_list
示例7: volume_types
def volume_types(cls):
"""Returns a DatasetList of Volume Type names and id's"""
cinder_cli = CinderCLI_Composite()
volume_type_list = cinder_cli.behaviors.list_volume_types()
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
return dataset_list
示例8: volume_types
def volume_types(
cls, max_datasets=None, randomize=False, volume_type_filter=None):
"""Returns a DatasetList of Volume Type names and id's"""
volume_type_list = cls._filter_model_list(
cls._volume_types(), volume_type_filter)
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
return dataset_list
示例9: valid_quota_names
def valid_quota_names(cls):
"""Creates a list of expected resource names"""
quota_test_dataset = DatasetList()
resources = ["snapshots", "volumes", "gigabytes"]
vol_types = cls._get_volume_type_names()
for resource in resources:
quota_test_dataset.append_new_dataset(resource, {"quota_name": resource})
for vol_name in vol_types:
resource_key = "{resource}_{vol_name}".format(resource=resource, vol_name=vol_name)
quota_test_dataset.append_new_dataset(resource_key, {"quota_name": resource_key})
return quota_test_dataset
示例10: flavors_by_images_by_volume_type
def flavors_by_images_by_volume_type(
cls, max_datasets=None, randomize=None,
flavor_filter=None, volume_type_filter=None, image_filter=None,
flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,):
"""Returns a DatasetList of all combinations of Flavors and
Volume Types.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=flavor_filter,
filter_mode=flavor_filter_mode)
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=volume_type_filter,
filter_mode=volume_type_filter_mode)
# Create dataset from all combinations of all images, flavors, and
# volume types
dataset_list = DatasetList()
for vtype in volume_type_list:
for flavor in flavor_list:
for image in image_list:
data = {'volume_type': vtype,
'flavor': flavor,
'image': image}
testname = \
"{flavor}_{image}_on_{vtype}".format(
flavor=str(flavor.name), image=str(image.name),
vtype=str(vtype.name)).replace(' ', '_').replace(
'.', '_').replace('(', '').replace(
')', '').replace('/', '-')
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例11: build_basic_dataset
def build_basic_dataset(data_dict, name):
"""
@summary: Builds a dataset list from a dictionary of key-value pairs
@param data_dict: Url amendments and values for the dataset list
@type data_dict: Dictionary
@param name: Name of the test parameter
@type name: String
@return: Dataset_List
@rtype: DatasetList
"""
dataset_list = DatasetList()
for key, value in data_dict.iteritems():
dataset_list.append_new_dataset(key, {name: value})
return dataset_list
示例12: images
def images(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all Images.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=model_filter, filter_mode=filter_mode)
dataset_list = DatasetList()
for img in image_list:
data = {'image': img}
dataset_list.append_new_dataset(
str(img.name).replace(" ", "_").replace("/", "-"), data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例13: flavors
def flavors(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all Flavors
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=model_filter, filter_mode=filter_mode)
dataset_list = DatasetList()
for flavor in flavor_list:
data = {'flavor': flavor}
dataset_list.append_new_dataset(
str(flavor.name).replace(" ", "_").replace("/", "-"), data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例14: import
import os
import json
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (
DataDrivenFixture, data_driven_test)
from cafe.engine.config import EngineConfig
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudcafe.common.tools import randomstring as randstring
BASE_NAME = "extract_archive"
HTTP_OK = 200
supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
for wrong_format in supported_formats:
if archive_format == wrong_format:
continue
name = '{}-{}'.format(archive_format, wrong_format)
archive_formats.append_new_dataset(
name, {'name': name,
'archive_format': archive_format,
'wrong_format': wrong_format})
@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
"""
Tests Swfit expand archive operations:
示例15: DatasetList
limitations under the License.
"""
import unittest
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes
# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
name='',
data_dict={},
tags=['sdn', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_icmp',
data_dict={'protocol': 'icmp'},
tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_tcp',
data_dict={'protocol': 'tcp'},
tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_udp',
data_dict={'protocol': 'udp'},