本文整理汇总了Python中shakedown.deployment_wait函数的典型用法代码示例。如果您正苦于以下问题:Python deployment_wait函数的具体用法?Python deployment_wait怎么用?Python deployment_wait使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了deployment_wait函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_event_channel
def test_event_channel():
""" Tests the event channel. The way events are verified is by streaming the events
to a events.txt file. The fixture ensures the file is removed before and after the test.
events checked are connecting, deploying a good task and killing a task.
"""
app_def = apps.mesos_app()
app_id = app_def['id']
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
master_ip = shakedown.master_ip()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_deployment_message():
status, stdout = shakedown.run_command(master_ip, 'cat events.txt')
assert 'event_stream_attached' in stdout, "event_stream_attached event has not been found"
assert 'deployment_info' in stdout, "deployment_info event has not been found"
assert 'deployment_step_success' in stdout, "deployment_step_success has not been found"
check_deployment_message()
client.remove_app(app_id, True)
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_kill_message():
status, stdout = shakedown.run_command(master_ip, 'cat events.txt')
assert 'KILLED' in stdout, "KILLED event has not been found"
check_kill_message()
示例2: test_incremental_groups_scale
def test_incremental_groups_scale():
"""
Scale number of groups.
"""
client = marathon.create_client()
batch_size_for = exponential_decay(start=40, decay=0.01)
total = 0
for step in itertools.count(start=0):
batch_size = batch_size_for(step)
total += batch_size
shakedown.echo("Add {} groups totaling {}".format(batch_size, total))
group_ids = ("/group-{0:0>4}".format(step * batch_size + i)
for i in range(batch_size))
app_ids = ("{}/app-1".format(g) for g in group_ids)
app_definitions = [app_def(app_id) for app_id in app_ids]
# There is no app id. We simply PUT /v2/apps to create groups in
# batches.
client.update_app('', app_definitions)
shakedown.deployment_wait(
timeout=timedelta(minutes=15).total_seconds())
shakedown.echo("done.")
示例3: test_incremental_group_nesting
def test_incremental_group_nesting():
"""
Scale depth of nested groups. Again we grow fast at the beginning and then
slow the growth.
"""
client = marathon.create_client()
batch_size_for = exponential_decay(start=5, decay=0.1)
depth = 0
for step in itertools.count(start=0):
batch_size = batch_size_for(step)
depth += batch_size
shakedown.echo("Create a group with a nesting of {}".format(depth))
group_ids = ("group-{0:0>3}".format(g) for g in range(depth))
nested_groups = '/'.join(group_ids)
# Note: We always deploy into the same nested groups.
app_id = '/{0}/app-1'.format(nested_groups)
client.add_app(app_def(app_id))
shakedown.deployment_wait(
timeout=timedelta(minutes=15).total_seconds())
shakedown.echo("done.")
示例4: test_incremental_apps_per_group_scale
def test_incremental_apps_per_group_scale():
"""
Try to reach the maximum number of apps. We start with batches of apps in a
group and decay the batch size.
"""
client = marathon.create_client()
batch_size_for = exponential_decay(start=500, decay=0.3)
for step in itertools.count(start=0):
batch_size = batch_size_for(step)
shakedown.echo("Add {} apps".format(batch_size))
group_id = "/batch-{0:0>3}".format(step)
app_ids = ("app-{0:0>4}".format(i) for i in range(batch_size))
app_definitions = [app_def(app_id) for app_id in app_ids]
next_batch = {
"apps": app_definitions,
"dependencies": [],
"id": group_id
}
client.create_group(next_batch)
shakedown.deployment_wait(
timeout=timedelta(minutes=15).total_seconds())
shakedown.echo("done.")
示例5: test_lock
def test_lock():
'''This test verifies that a second scheduler fails to startup when
an existing scheduler is running. Without locking, the scheduler
would fail during registration, but after writing its config to ZK.
So in order to verify that the scheduler fails immediately, we ensure
that the ZK config state is unmodified.'''
marathon_client = dcos.marathon.create_client()
# Get ZK state from running framework
zk_path = "dcos-service-{}/ConfigTarget".format(PACKAGE_NAME)
zk_config_old = shakedown.get_zk_node_data(zk_path)
# Get marathon app
app_id = "/{}".format(PACKAGE_NAME)
app = marathon_client.get_app(app_id)
old_timestamp = app.get("lastTaskFailure", {}).get("timestamp", None)
# Scale to 2 instances
labels = app["labels"]
labels.pop("MARATHON_SINGLE_INSTANCE_APP")
marathon_client.update_app(app_id, {"labels": labels})
shakedown.deployment_wait()
marathon_client.update_app(app_id, {"instances": 2})
# Wait for second scheduler to fail
def fn():
timestamp = marathon_client.get_app(app_id).get("lastTaskFailure", {}).get("timestamp", None)
return timestamp != old_timestamp
spin.time_wait_noisy(lambda: fn())
# Verify ZK is unchanged
zk_config_new = shakedown.get_zk_node_data(zk_path)
assert zk_config_old == zk_config_new
示例6: test_mom_when_mom_process_killed
def test_mom_when_mom_process_killed():
"""Launched a task from MoM then killed MoM."""
app_def = apps.sleep_app()
app_id = app_def["id"]
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
with shakedown.marathon_on_marathon():
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_id)
original_task_id = tasks[0]['id']
shakedown.kill_process_on_host(common.ip_of_mom(), 'marathon-assembly')
shakedown.wait_for_task('marathon', 'marathon-user', 300)
shakedown.wait_for_service_endpoint('marathon-user')
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_is_back():
tasks = client.get_tasks(app_id)
assert tasks[0]['id'] == original_task_id, "The task ID has changed"
check_task_is_back()
示例7: test_pod_file_based_secret
def test_pod_file_based_secret(secret_fixture):
secret_name, secret_value = secret_fixture
secret_normalized_name = secret_name.replace('/', '')
pod_id = '/{}'.format(uuid.uuid4().hex)
pod_def = {
"id": pod_id,
"containers": [{
"name": "container-1",
"resources": {
"cpus": 0.1,
"mem": 64
},
"endpoints": [{
"name": "http",
"hostPort": 0,
"protocol": [
"tcp"
]}
],
"exec": {
"command": {
"shell": "cat {} >> {}_file && /opt/mesosphere/bin/python -m http.server $ENDPOINT_HTTP".format(
secret_normalized_name, secret_normalized_name),
}
},
"volumeMounts": [{
"name": "vol",
"mountPath": secret_name
}],
}],
"networks": [{
"mode": "host"
}],
"volumes": [{
"name": "vol",
"secret": "secret1"
}],
"secrets": {
"secret1": {
"source": secret_name
}
}
}
client = marathon.create_client()
client.add_pod(pod_def)
shakedown.deployment_wait()
instances = client.show_pod(pod_id)['instances']
assert len(instances) == 1, 'Failed to start the file based secret pod'
port = instances[0]['containers'][0]['endpoints'][0]['allocatedHostPort']
host = instances[0]['networks'][0]['addresses'][0]
cmd = "curl {}:{}/{}_file".format(host, port, secret_normalized_name)
status, data = shakedown.run_command_on_master(cmd)
assert status, "{} did not succeed".format(cmd)
assert data.rstrip() == secret_value, "Got an unexpected secret data"
示例8: test_scale_app_in_group
def test_scale_app_in_group():
"""Scales an individual app in a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scaling just one app in the group
client.scale_app(app1_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after scale, but 1 was expected".format(len(tasks2))
示例9: test_marathon_when_disconnected_from_zk
def test_marathon_when_disconnected_from_zk():
""" Launch an app from Marathon. Then knock out access to zk from the MoM.
Verify the task is still good.
"""
app_def = app('zk-failure')
host = ip_other_than_mom()
pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks('/zk-failure')
original_task_id = tasks[0]['id']
with shakedown.iptable_rules(host):
block_port(host, 2181)
# time of the zk block
time.sleep(10)
# after access to zk is restored.
@retrying.retry(wait_fixed=1000, stop_max_delay=3000)
def check_task_is_back():
tasks = client.get_tasks('/zk-failure')
tasks[0]['id'] == original_task_id
check_task_is_back()
示例10: test_marathon_with_master_process_failure
def test_marathon_with_master_process_failure(marathon_service_name):
"""Launches an app and restarts the master. It is expected that the service endpoint eventually comes back and
the task ID stays the same.
"""
app_def = apps.sleep_app()
host = common.ip_other_than_mom()
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
tasks = client.get_tasks(app_def["id"])
original_task_id = tasks[0]['id']
common.systemctl_master('restart')
shakedown.wait_for_service_endpoint(marathon_service_name)
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def check_task_recovery():
tasks = client.get_tasks(app_def["id"])
assert len(tasks) == 1, "The number of tasks is {} after master restart, but 1 was expected".format(len(tasks))
assert tasks[0]['id'] == original_task_id, \
"Task {} has not recovered, it got replaced with another one: {}".format(original_task_id, tasks[0]['id'])
check_task_recovery()
示例11: test_pinned_task_does_not_scale_to_unpinned_host
def test_pinned_task_does_not_scale_to_unpinned_host():
"""Tests when a task lands on a pinned node (and barely fits) and it is asked to scale past
the resources of that node, no tasks will be launched on any other node.
"""
app_def = apps.sleep_app()
app_id = app_def['id']
host = common.ip_other_than_mom()
print('Constraint set to host: {}'.format(host))
# the size of cpus is designed to be greater than 1/2 of a node
# such that only 1 task can land on the node.
cores = common.cpus_on_agent(host)
app_def['cpus'] = max(0.6, cores - 0.5)
common.pin_to_host(app_def, host)
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
client.scale_app(app_id, 2)
time.sleep(5)
deployments = client.get_deployments(app_id=app_id)
tasks = client.get_tasks(app_id)
# still deploying
assert len(deployments) == 1, "The number of deployments is {}, but 1 was expected".format(len(deployments))
assert len(tasks) == 1, "The number of tasks is {}, but 1 was expected".format(len(tasks))
示例12: test_launch_mesos_grace_period
def test_launch_mesos_grace_period(marathon_service_name):
"""Tests 'taskKillGracePeriodSeconds' option using a Mesos container in a Marathon environment.
Read more details about this test in `test_root_marathon.py::test_launch_mesos_root_marathon_grace_period`
"""
app_def = apps.mesos_app()
default_grace_period = 3
grace_period = 20
app_def['fetch'] = [{"uri": "https://downloads.mesosphere.com/testing/test.py"}]
app_def['cmd'] = '/opt/mesosphere/bin/python test.py'
app_def['taskKillGracePeriodSeconds'] = grace_period
app_id = app_def['id'].lstrip('/')
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait(app_id=app_id)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
client.scale_app(app_id, 0)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# tasks should still be here after the default_grace_period
time.sleep(default_grace_period + 1)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is not None
# but not after the set grace_period
time.sleep(grace_period)
tasks = shakedown.get_service_task(marathon_service_name, app_id)
assert tasks is None
示例13: test_launch_and_scale_group
def test_launch_and_scale_group():
"""Launches and scales a group."""
group_def = groups.sleep_group()
groups_id = group_def["groups"][0]["id"]
client = marathon.create_client()
client.create_group(group_def)
shakedown.deployment_wait()
group_apps = client.get_group(groups_id)
apps = group_apps['apps']
assert len(apps) == 2, "The number of apps is {}, but 2 was expected".format(len(apps))
app1_id = group_def["groups"][0]["apps"][0]["id"]
app2_id = group_def["groups"][0]["apps"][1]["id"]
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 1, "The number of tasks #1 is {} after deployment, but 1 was expected".format(len(tasks1))
assert len(tasks2) == 1, "The number of tasks #2 is {} after deployment, but 1 was expected".format(len(tasks2))
# scale by 2 for the entire group
client.scale_group(groups_id, 2)
shakedown.deployment_wait()
tasks1 = client.get_tasks(app1_id)
tasks2 = client.get_tasks(app2_id)
assert len(tasks1) == 2, "The number of tasks #1 is {} after scale, but 2 was expected".format(len(tasks1))
assert len(tasks2) == 2, "The number of tasks #2 is {} after scale, but 2 was expected".format(len(tasks2))
示例14: test_event_channel
def test_event_channel():
""" Tests the Marathon event channnel specific to pod events.
"""
client = marathon.create_client()
pod_id = "/pod-create"
pod_json = _pods_json()
pod_json["id"] = pod_id
client.add_pod(pod_json)
shakedown.deployment_wait()
# look for created
@retrying.retry(stop_max_delay=10000)
def check_deployment_message():
status, stdout = shakedown.run_command_on_master('cat test.txt')
assert 'event_stream_attached' in stdout
assert 'pod_created_event' in stdout
assert 'deployment_step_success' in stdout
pod_json["scaling"]["instances"] = 3
client.update_pod(pod_id, pod_json)
shakedown.deployment_wait()
# look for updated
@retrying.retry(stop_max_delay=10000)
def check_update_message():
status, stdout = shakedown.run_command_on_master('cat test.txt')
assert 'pod_updated_event' in stdout
示例15: test_vip_docker_bridge_mode
def test_vip_docker_bridge_mode(marathon_service_name):
"""Tests the creation of a VIP from a python command in a docker image using bridge mode.
the test validates the creation of an app with the VIP label and the accessability
of the service via the VIP.
"""
app_def = apps.docker_http_server()
vip_name = app_def["id"].lstrip("/")
fqn = '{}.{}.l4lb.thisdcos.directory'.format(vip_name, marathon_service_name)
app_def['id'] = vip_name
app_def['container']['docker']['portMappings'] = [{
"containerPort": 8080,
"hostPort": 0,
"labels": {
"VIP_0": "/{}:10000".format(vip_name)
},
"protocol": "tcp",
"name": "{}".format(vip_name)
}]
client = marathon.create_client()
client.add_app(app_def)
shakedown.deployment_wait()
@retrying.retry(wait_fixed=1000, stop_max_attempt_number=30, retry_on_exception=common.ignore_exception)
def http_output_check():
time.sleep(1)
common.assert_http_code('{}:{}'.format(fqn, 10000))
http_output_check()