本文整理汇总了Python中cafe.drivers.unittest.datasets.DatasetList.append_new_dataset方法的典型用法代码示例。如果您正苦于以下问题:Python DatasetList.append_new_dataset方法的具体用法?Python DatasetList.append_new_dataset怎么用?Python DatasetList.append_new_dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cafe.drivers.unittest.datasets.DatasetList
的用法示例。
在下文中一共展示了DatasetList.append_new_dataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: volume_types_with_restore_control
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def volume_types_with_restore_control(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=BlockstorageDatasets.INCLUSION_MODE):
"""Returns a DatasetList of all VolumeTypes
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values.
"""
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=model_filter,
filter_mode=filter_mode)
dataset_list = DatasetList()
is_enabled = \
cls._volumes.config.allow_snapshot_restore_to_different_type
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_,
'restore_to_different_type_enabled': is_enabled}
test_name = "{0}_to_other_is_{1}".format(
vol_type.name, "allowed" if is_enabled else "disabled")
dataset_list.append_new_dataset(test_name, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例2: images_by_volume_type
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def images_by_volume_type(
cls, max_datasets=None, randomize=False, image_filter=None,
volume_type_filter=None):
"""Returns a DatasetList of permuations of Volume Types and Images.
Requests all available images and volume types from API, and applies
image_filter and volume_type_filter if provided.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._filter_model_list(cls._images(), image_filter)
volume_type_list = cls._filter_model_list(
cls._volume_types(), volume_type_filter)
# Create dataset from all combinations of all images and volume types
dataset_list = DatasetList()
for vol_type in volume_type_list:
for img in image_list:
data = {'volume_type': vol_type,
'image': img}
testname = "{0}_volume_from_{1}_image".format(
vol_type.name,
str(img.name).replace(" ", "_"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
if randomize:
shuffle(dataset_list)
if max_datasets:
dataset_list = dataset_list[:max_datasets]
return dataset_list
示例3: images_by_flavor
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def images_by_flavor(
cls, max_datasets=None, randomize=False,
image_filter=None, flavor_filter=None,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all combinations of Flavors and Images.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=flavor_filter,
filter_mode=flavor_filter_mode)
dataset_list = DatasetList()
for image in image_list:
for flavor in flavor_list:
data = {'flavor': flavor,
'image': image}
testname = \
"image_{0}_and_flavor_{1}".format(
str(image.name).replace(" ", "_").replace("/", "-"),
str(flavor.name).replace(" ", "_").replace("/", "-"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例4: images_by_volume_type
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def images_by_volume_type(
cls, max_datasets=None, randomize=False,
image_filter=None, volume_type_filter=None,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all combinations of Images and
Volume Types.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=volume_type_filter,
filter_mode=volume_type_filter_mode)
# Create dataset from all combinations of all images and volume types
dataset_list = DatasetList()
for vtype in volume_type_list:
for image in image_list:
data = {'volume_type': vtype,
'image': image}
testname = \
"{0}_and_{1}".format(
str(vtype.name).replace(" ", "_"),
str(image.name).replace(" ", "_"))
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例5: volume_types
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def volume_types(
cls, max_datasets=None, randomize=None, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE, tags=None):
"""Returns a DatasetList of all VolumeTypes
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=model_filter,
filter_mode=filter_mode)
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
# Apply modifiers
dataset_list = cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
# Apply Tags
if tags:
dataset_list.apply_test_tags(*tags)
return dataset_list
示例6: volume_types
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def volume_types(cls):
"""Returns a DatasetList of Volume Type names and id's"""
cinder_cli = CinderCLI_Composite()
volume_type_list = cinder_cli.behaviors.list_volume_types()
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
return dataset_list
示例7: volume_types
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def volume_types(
cls, max_datasets=None, randomize=False, volume_type_filter=None):
"""Returns a DatasetList of Volume Type names and id's"""
volume_type_list = cls._filter_model_list(
cls._volume_types(), volume_type_filter)
dataset_list = DatasetList()
for vol_type in volume_type_list:
data = {'volume_type_name': vol_type.name,
'volume_type_id': vol_type.id_}
dataset_list.append_new_dataset(vol_type.name, data)
return dataset_list
示例8: valid_quota_names
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def valid_quota_names(cls):
"""Creates a list of expected resource names"""
quota_test_dataset = DatasetList()
resources = ["snapshots", "volumes", "gigabytes"]
vol_types = cls._get_volume_type_names()
for resource in resources:
quota_test_dataset.append_new_dataset(resource, {"quota_name": resource})
for vol_name in vol_types:
resource_key = "{resource}_{vol_name}".format(resource=resource, vol_name=vol_name)
quota_test_dataset.append_new_dataset(resource_key, {"quota_name": resource_key})
return quota_test_dataset
示例9: flavors_by_images_by_volume_type
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def flavors_by_images_by_volume_type(
cls, max_datasets=None, randomize=None,
flavor_filter=None, volume_type_filter=None, image_filter=None,
flavor_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
volume_type_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,
image_filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE,):
"""Returns a DatasetList of all combinations of Flavors and
Volume Types.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=image_filter,
filter_mode=image_filter_mode)
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=flavor_filter,
filter_mode=flavor_filter_mode)
volume_type_list = cls._get_volume_types()
volume_type_list = cls._filter_model_list(
volume_type_list, model_filter=volume_type_filter,
filter_mode=volume_type_filter_mode)
# Create dataset from all combinations of all images, flavors, and
# volume types
dataset_list = DatasetList()
for vtype in volume_type_list:
for flavor in flavor_list:
for image in image_list:
data = {'volume_type': vtype,
'flavor': flavor,
'image': image}
testname = \
"{flavor}_{image}_on_{vtype}".format(
flavor=str(flavor.name), image=str(image.name),
vtype=str(vtype.name)).replace(' ', '_').replace(
'.', '_').replace('(', '').replace(
')', '').replace('/', '-')
dataset_list.append_new_dataset(testname, data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例10: build_basic_dataset
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def build_basic_dataset(data_dict, name):
"""
@summary: Builds a dataset list from a dictionary of key-value pairs
@param data_dict: Url amendments and values for the dataset list
@type data_dict: Dictionary
@param name: Name of the test parameter
@type name: String
@return: Dataset_List
@rtype: DatasetList
"""
dataset_list = DatasetList()
for key, value in data_dict.iteritems():
dataset_list.append_new_dataset(key, {name: value})
return dataset_list
示例11: images
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def images(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all Images.
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
image_list = cls._get_images()
image_list = cls._filter_model_list(
image_list, model_filter=model_filter, filter_mode=filter_mode)
dataset_list = DatasetList()
for img in image_list:
data = {'image': img}
dataset_list.append_new_dataset(
str(img.name).replace(" ", "_").replace("/", "-"), data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例12: flavors
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
def flavors(
cls, max_datasets=None, randomize=False, model_filter=None,
filter_mode=ModelBasedDatasetToolkit.INCLUSION_MODE):
"""Returns a DatasetList of all Flavors
Filters should be dictionaries with model attributes as keys and
lists of attributes as key values
"""
flavor_list = cls._get_flavors()
flavor_list = cls._filter_model_list(
flavor_list, model_filter=model_filter, filter_mode=filter_mode)
dataset_list = DatasetList()
for flavor in flavor_list:
data = {'flavor': flavor}
dataset_list.append_new_dataset(
str(flavor.name).replace(" ", "_").replace("/", "-"), data)
# Apply modifiers
return cls._modify_dataset_list(
dataset_list, max_datasets=max_datasets, randomize=randomize)
示例13: DatasetList
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
from cloudcafe.common.tools import randomstring as randstring
BASE_NAME = "extract_archive"
HTTP_OK = 200
supported_formats = ['tar', 'tar.gz', 'tar.bz2']
archive_formats = DatasetList()
for archive_format in supported_formats:
for wrong_format in supported_formats:
if archive_format == wrong_format:
continue
name = '{}-{}'.format(archive_format, wrong_format)
archive_formats.append_new_dataset(
name, {'name': name,
'archive_format': archive_format,
'wrong_format': wrong_format})
@DataDrivenFixture
class ExtractArchiveFormatParameterTest(ObjectStorageFixture):
"""
Tests Swfit expand archive operations:
"""
@classmethod
def setUpClass(cls):
super(ExtractArchiveFormatParameterTest, cls).setUpClass()
cls.default_obj_name = cls.behaviors.VALID_OBJECT_NAME
cls.data_dir = EngineConfig().data_directory
cls.no_compression = None
cls.storage_url = cls.client.storage_url
示例14: import
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
limitations under the License.
"""
import os
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import (
DataDrivenFixture, data_driven_test)
from cafe.engine.config import EngineConfig
from cloudcafe.common.tools.md5hash import get_md5_hash
from cloudroast.objectstorage.fixtures import ObjectStorageFixture
BASE_NAME = "extract_archive"
HTTP_OK = 200
archive_formats = DatasetList()
archive_formats.append_new_dataset(
'tar', {'name': 'tar', 'archive_format': 'tar'})
archive_formats.append_new_dataset(
'tar.gz', {'name': 'tar.gz', 'archive_format': 'tar.gz'})
archive_formats.append_new_dataset(
'tar.bz2', {'name': 'tar.bz2', 'archive_format': 'tar.bz2'})
@DataDrivenFixture
class ExtractArchiveTest(ObjectStorageFixture):
"""
Tests Swfit expand archive operations
Notes:
The initial response status code is for initial the request.
The object extraction status code is sent in the body of the
response.
"""
示例15: DatasetList
# 需要导入模块: from cafe.drivers.unittest.datasets import DatasetList [as 别名]
# 或者: from cafe.drivers.unittest.datasets.DatasetList import append_new_dataset [as 别名]
import unittest
from cafe.drivers.unittest.datasets import DatasetList
from cafe.drivers.unittest.decorators import DataDrivenFixture, \
data_driven_test, tags
from cloudcafe.networking.networks.config import NetworkingSecondUserConfig
from cloudroast.networking.networks.fixtures \
import NetworkingSecurityGroupsFixture
from cloudcafe.networking.networks.extensions.security_groups_api.constants \
import SecurityGroupsErrorTypes, SecurityGroupsResponseCodes
# Creating data sets for data driven testing
data_set_list = DatasetList()
data_set_list.append_new_dataset(
name='',
data_dict={},
tags=['sdn', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_icmp',
data_dict={'protocol': 'icmp'},
tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_tcp',
data_dict={'protocol': 'tcp'},
tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_protocol_udp',
data_dict={'protocol': 'udp'},
tags=['periodic', 'post', 'positive', 'rbac_creator'])
data_set_list.append_new_dataset(
name='w_ethertype_ipv4',