本文整理汇总了Python中tests.mocks.EngineEmul.eval_env方法的典型用法代码示例。如果您正苦于以下问题:Python EngineEmul.eval_env方法的具体用法?Python EngineEmul.eval_env怎么用?Python EngineEmul.eval_env使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tests.mocks.EngineEmul
的用法示例。
在下文中一共展示了EngineEmul.eval_env方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TestScenarioExecutor
# 需要导入模块: from tests.mocks import EngineEmul [as 别名]
# 或者: from tests.mocks.EngineEmul import eval_env [as 别名]
class TestScenarioExecutor(BZTestCase):
def setUp(self):
super(TestScenarioExecutor, self).setUp()
self.engine = EngineEmul()
self.executor = ScenarioExecutor()
self.executor.engine = self.engine
self.executor.env = self.executor.engine.env
def test_scenario_extraction_script(self):
self.engine.config.merge({
"execution": [{
"scenario": {
"script": "tests/resources/selenium/python/test_blazemeter_fail.py",
"param": "value"
}}]})
self.executor.execution = self.engine.config.get('execution')[0]
self.executor.get_scenario()
config = self.engine.config
self.assertEqual(config['execution'][0]['scenario'], 'test_blazemeter_fail.py')
self.assertIn('test_blazemeter_fail.py', config['scenarios'])
def test_body_files(self):
body_file1 = RESOURCES_DIR + "jmeter/body-file.dat"
body_file2 = RESOURCES_DIR + "jmeter/jmx/http.jmx"
self.engine.config.merge({
'execution': [{
'iterations': 1,
'executor': 'siege',
'scenario': 'bf'}],
'scenarios': {
'bf': {
"requests": [
{
'url': 'http://first.com',
'body-file': body_file1
}, {
'url': 'http://second.com',
'body': 'body2',
'body-file': body_file2}]}}})
self.executor.execution = self.engine.config.get('execution')[0]
scenario = self.executor.get_scenario()
# check body fields in get_requests() results
reqs = list(scenario.get_requests())
body_fields = [req.body for req in reqs]
self.assertIn('sample of body', body_fields[0])
self.assertIn('body2', body_fields[1])
# check body fields and body-files fields after get_requests()
scenario = self.executor.get_scenario()
body_files = [req.get('body-file') for req in scenario.get('requests')]
body_fields = [req.get('body') for req in scenario.get('requests')]
self.assertTrue(all(body_files))
self.assertFalse(body_fields[0])
self.assertIn('body2', body_fields[1])
def test_scenario_is_script(self):
self.engine.config.merge({
"execution": [{
"scenario": "tests/resources/selenium/python/test_blazemeter_fail.py"
}]})
self.executor.execution = self.engine.config.get('execution')[0]
self.executor.get_scenario()
config = self.engine.config
self.assertEqual(config['execution'][0]['scenario'], 'test_blazemeter_fail.py')
self.assertIn('test_blazemeter_fail.py', config['scenarios'])
def test_scenario_extraction_request(self):
self.engine.config.merge({
"execution": [{
"scenario": {
"requests": [{"url": "url.example"}],
"param": "value"
}}]})
self.executor.execution = self.engine.config.get('execution')[0]
self.executor.get_scenario()
config = self.engine.config
scenario = config['execution'][0]['scenario']
self.assertTrue(isinstance(scenario, string_types))
self.assertIn(scenario, config['scenarios'])
def test_scenario_not_found(self):
self.engine.config.merge({
"execution": [{
"scenario": "non-existent"
}]})
self.executor.execution = self.engine.config.get('execution')[0]
self.assertRaises(TaurusConfigError, self.executor.get_scenario)
def test_scenario_no_requests(self):
self.engine.config.merge({
"execution": [{
"scenario": ["url1", "url2"]
}]})
self.executor.execution = self.engine.config.get('execution')[0]
self.assertRaises(TaurusConfigError, self.executor.get_scenario)
def test_passes_artifacts_dir(self):
cmdline = "echo %TAURUS_ARTIFACTS_DIR%" if is_windows() else "echo $TAURUS_ARTIFACTS_DIR"
self.engine.eval_env()
#.........这里部分代码省略.........
示例2: TestEngine
# 需要导入模块: from tests.mocks import EngineEmul [as 别名]
# 或者: from tests.mocks.EngineEmul import eval_env [as 别名]
#.........这里部分代码省略.........
]
self.obj.configure(configs)
self.obj.config["provisioning"] = "unknown"
self.obj.config["modules"]["unknown"] = BetterDict()
self.assertRaises(TaurusConfigError, self.obj.prepare)
def test_null_aggregator(self):
self.obj.config.merge({
"execution": [{
"scenario": {
"requests": [{"url": "http://example.com/"}],
}}],
"settings": {
"aggregator": None,
"default-executor": "jmeter",
},
"modules": {
"local": "bzt.modules.provisioning.Local",
"jmeter": {"class": "tests.modules.jmeter.MockJMeterExecutor",
"protocol-handlers": {"http": "bzt.jmx.http.HTTPProtocolHandler"}},
}})
self.obj.unify_config()
self.obj.prepare()
def test_yaml_multi_docs(self):
configs = [
RESOURCES_DIR + "yaml/multi-docs.yml",
self.paths
]
self.obj.configure(configs)
self.obj.prepare()
self.assertEqual(len(self.obj.config["execution"]), 2)
def test_json_format_regression(self):
configs = [
RESOURCES_DIR + "json/json-but-not-yaml.json"
]
self.obj.configure(configs)
self.obj.prepare()
def test_invalid_format(self):
configs = [
RESOURCES_DIR + "jmeter-dist-3.0.zip"
]
self.assertRaises(TaurusConfigError, lambda: self.obj.configure(configs))
def test_included_configs(self):
configs = [
RESOURCES_DIR + "yaml/included-level1.yml",
]
self.obj.configure(configs)
self.assertTrue(self.obj.config["level1"])
self.assertTrue(self.obj.config["level2"])
self.assertTrue(self.obj.config["level3"])
self.assertListEqual(['included-level2.yml', 'included-level3.yml'], self.obj.config["included-configs"])
def test_included_configs_cycle(self):
configs = [
RESOURCES_DIR + "yaml/included-circular1.yml",
]
self.obj.configure(configs)
self.assertTrue(self.obj.config["level1"])
self.assertTrue(self.obj.config["level2"])
self.assertListEqual(['included-circular2.yml', 'included-circular1.yml', 'included-circular2.yml'],
self.obj.config["included-configs"])
def test_env_eval(self):
configs = [
RESOURCES_DIR + "yaml/env-eval.yml",
]
os.environ["BZT_ENV_TEST_UNSET"] = "set"
try:
self.obj.configure(configs)
self.obj.eval_env()
self.assertEquals("success/top", self.obj.config["toplevel"])
self.assertEquals("success/test/", self.obj.config["settings"]["artifacts-dir"])
self.assertEquals("http://success/", self.obj.config["scenarios"]["scen1"]["default-address"])
self.assertEquals("/success/", self.obj.config["scenarios"]["scen1"]["requests"][0])
self.assertNotEquals("/${PATH}/", self.obj.config["scenarios"]["scen1"]["requests"][1])
self.assertEquals("/${TEMP}/", self.obj.config["scenarios"]["scen1"]["requests"][2])
self.assertEquals("/" + self.obj.artifacts_dir + "/", self.obj.config["scenarios"]["scen1"]["requests"][3])
finally:
if "BZT_ENV_TEST" in os.environ:
os.environ.pop("BZT_ENV_TEST")
if "BZT_ENV_TEST_UNSET" in os.environ:
os.environ.pop("BZT_ENV_TEST_UNSET")
def test_singletone_service(self):
configs = [
RESOURCES_DIR + "yaml/singletone-service.yml",
]
self.obj.configure(configs, read_config_files=False)
self.obj.prepare()
self.assertEquals(2, len(self.obj.services))
self.assertEquals(None, self.obj.services[0].parameters['run-at'])
self.assertEquals("mock", self.obj.services[1].parameters['run-at'])
self.assertEquals(2, len(self.obj.reporters))
self.assertEquals("mock", self.obj.reporters[0].parameters['run-at'])
self.assertEquals(None, self.obj.reporters[1].parameters['run-at'])