本文整理汇总了Python中tests.Test类的典型用法代码示例。如果您正苦于以下问题:Python Test类的具体用法?Python Test怎么用?Python Test使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Test类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get
def test_get(self):
""" Basic local get test """
test = Test()
test.url = self.prefix + "/api/person/"
test_response = resttest.run_test(test)
self.assertTrue(test_response.passed)
self.assertEqual(200, test_response.response_code)
示例2: test_get_validators
def test_get_validators(self):
""" Test that validators work correctly """
test = Test()
test.url = self.prefix + '/api/person/'
# Validators need library calls to configure them
test.validators = list()
cfg_exists = {'jsonpath_mini': "objects.0", 'test':'exists'}
test.validators.append(validators.parse_validator('extract_test', cfg_exists))
cfg_exists_0 = {'jsonpath_mini': "meta.offset", 'test':'exists'}
test.validators.append(validators.parse_validator('extract_test', cfg_exists_0))
cfg_not_exists = {'jsonpath_mini': "objects.100", 'test':'not_exists'}
test.validators.append(validators.parse_validator('extract_test', cfg_not_exists))
cfg_compare_login = {'jsonpath_mini': 'objects.0.login', 'expected': 'gbaltar'}
test.validators.append(validators.parse_validator('compare', cfg_compare_login))
cfg_compare_id = {'jsonpath_mini': 'objects.1.id', 'comparator':'gt', 'expected': -1}
test.validators.append(validators.parse_validator('compare', cfg_compare_id))
test_response = resttest.run_test(test)
for failure in test_response.failures:
print "REAL FAILURE"
print "Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message)
if failure.details:
print "Validator/Error details: "+str(failure.details)
self.assertFalse(test_response.failures)
self.assertTrue(test_response.passed)
示例3: test_get_validators
def test_get_validators(self):
""" Test that validators work correctly """
test = Test()
test.url = self.prefix + "/api/person/"
# Validators need library calls to configure them
test.validators = list()
cfg_exists = {"jsonpath_mini": "objects.0", "test": "exists"}
test.validators.append(validators.parse_validator("extract_test", cfg_exists))
cfg_exists_0 = {"jsonpath_mini": "meta.offset", "test": "exists"}
test.validators.append(validators.parse_validator("extract_test", cfg_exists_0))
cfg_not_exists = {"jsonpath_mini": "objects.100", "test": "not_exists"}
test.validators.append(validators.parse_validator("extract_test", cfg_not_exists))
cfg_compare_login = {"jsonpath_mini": "objects.0.login", "expected": "gbaltar"}
test.validators.append(validators.parse_validator("compare", cfg_compare_login))
cfg_compare_id = {"jsonpath_mini": "objects.1.id", "comparator": "gt", "expected": -1}
test.validators.append(validators.parse_validator("compare", cfg_compare_id))
test_response = resttest.run_test(test)
for failure in test_response.failures:
print("REAL FAILURE")
print("Test Failure, failure type: {0}, Reason: {1}".format(failure.failure_type, failure.message))
if failure.details:
print("Validator/Error details: " + str(failure.details))
self.assertFalse(test_response.failures)
self.assertTrue(test_response.passed)
示例4: main
def main():
"""
Prodecimiento principal del programa
TODO: Bucle infinito que te permita elegir otro test(o el mismo) al acabar el presente
"""
correct_path = True
# path = raw_input('Introduzca el nombre (o ruta) de su fichero de test\'.csv\':')
# path = 'testPOO.csv'
path = 'testEDA.csv'
try:
io = Read(path)
except IOError:
correct_path = False
else:
correct_path = True
while(not correct_path):
path = raw_input("Introduzca la direccion del fichero: ")
try:
io = Read(path)
except IOError:
correct_path = False
else:
correct_path = True
info, answers = io.getContent()
test = Test(info, answers)
run(test)
print test.finalResult()
return 0
示例5: test_failed_get
def test_failed_get(self):
""" Test GET that should fail """
test = Test()
test.url = self.prefix + "/api/person/500/"
test_response = resttest.run_test(test)
self.assertEqual(False, test_response.passed)
self.assertEqual(404, test_response.response_code)
示例6: test_get_redirect
def test_get_redirect(self):
""" Basic local get test """
test = Test()
test.curl_options = {"FOLLOWLOCATION": True}
test.url = self.prefix + "/api/person"
test_response = resttest.run_test(test)
self.assertTrue(test_response.passed)
self.assertEqual(200, test_response.response_code)
示例7: parse_testsets
def parse_testsets(base_url, test_structure, test_files = set(), working_directory = None):
""" Convert a Python datastructure read from validated YAML to a set of structured testsets
The data stucture is assumed to be a list of dictionaries, each of which describes:
- a tests (test structure)
- a simple test (just a URL, and a minimal test is created)
- or overall test configuration for this testset
- an import (load another set of tests into this one, from a separate file)
- For imports, these are recursive, and will use the parent config if none is present
Note: test_files is used to track tests that import other tests, to avoid recursive loops
This returns a list of testsets, corresponding to imported testsets and in-line multi-document sets
"""
tests_out = list()
test_config = TestConfig()
testsets = list()
benchmarks = list()
if working_directory is None:
working_directory = os.path.abspath(os.getcwd())
#returns a testconfig and collection of tests
for node in test_structure: #Iterate through lists of test and configuration elements
if isinstance(node,dict): #Each config element is a miniature key-value dictionary
node = lowercase_keys(node)
for key in node:
if key == u'import':
importfile = node[key] #import another file
if importfile not in test_files:
logger.debug("Importing test sets: " + importfile)
test_files.add(importfile)
import_test_structure = read_test_file(importfile)
with cd(os.path.dirname(os.path.realpath(importfile))):
import_testsets = parse_testsets(base_url, import_test_structure, test_files)
testsets.extend(import_testsets)
elif key == u'url': #Simple test, just a GET to a URL
mytest = Test()
val = node[key]
assert isinstance(val,str) or isinstance(val,unicode)
mytest.url = base_url + val
tests_out.append(mytest)
elif key == u'test': #Complex test with additional parameters
with cd(working_directory):
child = node[key]
mytest = Test.parse_test(base_url, child)
tests_out.append(mytest)
elif key == u'benchmark':
benchmark = parse_benchmark(base_url, node[key])
benchmarks.append(benchmark)
elif key == u'config' or key == u'configuration':
test_config = parse_configuration(node[key])
testset = TestSet()
testset.tests = tests_out
testset.config = test_config
testset.benchmarks = benchmarks
testsets.append(testset)
return testsets
示例8: test_put_inplace
def test_put_inplace(self):
""" Test PUT where item already exists """
test = Test()
test.url = self.prefix + "/api/person/1/"
test.method = u"PUT"
test.body = '{"first_name": "Gaius","id": 1,"last_name": "Baltar","login": "gbaltar"}'
test.headers = {u"Content-Type": u"application/json"}
test_response = resttest.run_test(test)
self.assertEqual(True, test_response.passed)
self.assertEqual(200, test_response.response_code)
示例9: test_header_validators
def test_header_validators(self):
test = Test()
test.url = self.prefix + "/api/person/1/"
config = {"header": "server", "comparator": "contains", "expected": "WSGI"}
test.validators = list()
test.validators.append(validators.parse_validator("comparator", config))
result = resttest.run_test(test)
if result.failures:
for fail in result.failures:
print(fail)
self.assertTrue(result.passed)
示例10: test_get_validators_fail
def test_get_validators_fail(self):
""" Test validators that should fail """
test = Test()
test.url = self.prefix + "/api/person/"
test.validators = list()
cfg_exists = {"jsonpath_mini": "objects.500", "test": "exists"}
test.validators.append(validators.parse_validator("extract_test", cfg_exists))
cfg_not_exists = {"jsonpath_mini": "objects.1", "test": "not_exists"}
test.validators.append(validators.parse_validator("extract_test", cfg_not_exists))
cfg_compare = {"jsonpath_mini": "objects.1.last_name", "expected": "NotJenkins"}
test.validators.append(validators.parse_validator("compare", cfg_compare))
test_response = resttest.run_test(test)
self.assertFalse(test_response.passed)
self.assertTrue(test_response.failures)
self.assertEqual(3, len(test_response.failures))
示例11: test_get_validators_fail
def test_get_validators_fail(self):
""" Test validators that should fail """
test = Test()
test.url = self.prefix + '/api/person/'
test.validators = list()
cfg_exists = {'jsonpath_mini': "objects.500", 'test':'exists'}
test.validators.append(validators.parse_validator('extract_test', cfg_exists))
cfg_not_exists = {'jsonpath_mini': "objects.1", 'test':'not_exists'}
test.validators.append(validators.parse_validator('extract_test', cfg_not_exists))
cfg_compare = {'jsonpath_mini': "objects.1.last_name", 'expected':'NotJenkins'}
test.validators.append(validators.parse_validator('compare', cfg_compare))
test_response = resttest.run_test(test)
self.assertFalse(test_response.passed)
self.assertTrue(test_response.failures)
self.assertEqual(3, len(test_response.failures))
示例12: _runPlainR
def _runPlainR(self):
""" Runs the tester in plain R mode, where it outputs the plain R version of the tests to a special file, rather than running the tests. """
if (len(self._testRoots)!=1):
error("When using --plainr mode, only one root can be selected");
root = self._testRoots[0][0]
lastFilename = ""
outfile = None
fileTests = 0
print("Creating R-compatible raw tests. The following is a list of test file entered")
print("and number of tests generated:\n")
for t in Test.enumerate(self._testRoots, self._recursive):
if (t.filename() != lastFilename):
if (outfile != None):
print("["+str(fileTests)+"]")
outfile.close()
fileTests = 0
fname = os.path.join(self._plainROutput, t.filename()[len(root)+1:])
dirname, filename = os.path.split(fname)
print(strFormat(fname), end="")
os.makedirs(dirname, exist_ok = True)
outfile = open(fname, "w")
lastFilename = t.filename()
for c in t.commands():
if (c.find("#! ") == 0):
outfile.write("#! "+t.name()+"\n")
elif (c.find("#!g") == 0):
pass
else:
outfile.write(c.replace("\\\"",'"')+"\n")
outfile.write(t.code()+"\n\n")
fileTests += 1
if (outfile != None):
print("["+str(fileTests)+"]")
outfile.close()
示例13: test_header_validators
def test_header_validators(self):
test = Test()
test.url = self.prefix + '/api/person/1/'
config = {
'header': 'server',
'comparator': 'contains',
'expected': 'WSGI'
}
test.validators = list()
test.validators.append(
validators.parse_validator('comparator', config))
result = resttest.run_test(test)
if result.failures:
for fail in result.failures:
print(fail)
self.assertTrue(result.passed)
示例14: test_header_extraction
def test_header_extraction(self):
test = Test()
test.url = self.prefix + "/api/person/1/"
key1 = "server-header"
key2 = "server-header-mixedcase"
test.extract_binds = {
key1: validators.HeaderExtractor.parse("server"),
# Verify case-insensitive behavior
key2: validators.HeaderExtractor.parse("sErVer"),
}
my_context = Context()
test_response = resttest.run_test(test, context=my_context)
val1 = my_context.get_value(key1)
val2 = my_context.get_value(key2)
self.assertEqual(val1, val2)
self.assertTrue("wsgi" in val1.lower())
self.assertTrue("wsgi" in val2.lower())
示例15: parse_benchmark
def parse_benchmark(base_url, node):
""" Try building a benchmark configuration from deserialized configuration root node """
node = lowercase_keys(flatten_dictionaries(node)) # Make it usable
benchmark = Benchmark()
# Read & set basic test parameters
benchmark = Test.parse_test(base_url, node, benchmark)
# Complex parsing because of list/dictionary/singleton legal cases
for key, value in node.items():
if key == u'warmup_runs':
benchmark.warmup_runs = int(value)
elif key == u'benchmark_runs':
benchmark.benchmark_runs = int(value)
elif key == u'output_format':
format = value.lower()
if format in OUTPUT_FORMATS:
benchmark.output_format = format
else:
raise Exception('Invalid benchmark output format: ' + format)
elif key == u'output_file':
if not isinstance(value, basestring):
raise Exception("Invalid output file format")
benchmark.output_file = value
elif key == u'metrics':
if isinstance(value, unicode) or isinstance(value,str):
# Single value
benchmark.add_metric(unicode(value, 'UTF-8'))
elif isinstance(value, list) or isinstance(value, set):
# List of single values or list of {metric:aggregate, ...}
for metric in value:
if isinstance(metric, dict):
for metricname, aggregate in metric.items():
if not isinstance(metricname, basestring):
raise Exception("Invalid metric input: non-string metric name")
if not isinstance(aggregate, basestring):
raise Exception("Invalid aggregate input: non-string aggregate name")
# TODO unicode-safe this
benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))
elif isinstance(metric, unicode) or isinstance(metric, str):
benchmark.add_metric(unicode(metric,'UTF-8'))
elif isinstance(value, dict):
# Dictionary of metric-aggregate pairs
for metricname, aggregate in value.items():
if not isinstance(metricname, basestring):
raise Exception("Invalid metric input: non-string metric name")
if not isinstance(aggregate, basestring):
raise Exception("Invalid aggregate input: non-string aggregate name")
benchmark.add_metric(unicode(metricname,'UTF-8'), unicode(aggregate,'UTF-8'))
else:
raise Exception("Invalid benchmark metric datatype: "+str(value))
return benchmark