本文整理汇总了Python中marathon.MarathonClient.list_tasks方法的典型用法代码示例。如果您正苦于以下问题:Python MarathonClient.list_tasks方法的具体用法?Python MarathonClient.list_tasks怎么用?Python MarathonClient.list_tasks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类marathon.MarathonClient
的用法示例。
在下文中一共展示了MarathonClient.list_tasks方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: enable_logstash
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
def enable_logstash():
print "Checking ELK entries\n"
endpoint = os.getenv('MARATHON_ENDPOINT')
username = os.getenv('MARATHON_HTTP_USER')
password = os.getenv('MARATHON_HTTP_PASSWORD')
elk_host = None
if endpoint:
try:
print 'Discovering configuration from %s\n' % endpoint
c = MarathonClient('https://%s' % endpoint, username=username, password=password)
tasks = c.list_tasks('yroblaelk')
for task in tasks:
if task.started_at:
elk_host = task.host
break
except:
pass
# check entries in wsrep_cluster_address
if elk_host:
print 'Found ELK address %s\n' % elk_host
for line in fileinput.input(LOGSTASH_CONF_FILE, inplace=True):
line_content = line
sys.stdout.write(line.replace("ELK_HOST", elk_host))
# reboot logstash
subprocess.call(["service", "logstash-forwarder", "restart"])
示例2: test_list_tasks_without_app_id
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
def test_list_tasks_without_app_id(m):
fake_response = '{ "tasks": [ { "appId": "/anapp", "healthCheckResults": ' \
'[ { "alive": true, "consecutiveFailures": 0, "firstSuccess": "2014-10-03T22:57:02.246Z", "lastFailure": null, ' \
'"lastSuccess": "2014-10-03T22:57:41.643Z", "taskId": "bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799" } ],' \
' "host": "10.141.141.10", "id": "bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799", "ports": [ 31000 ], ' \
'"servicePorts": [ 9000 ], "stagedAt": "2014-10-03T22:16:27.811Z", "startedAt": "2014-10-03T22:57:41.587Z", ' \
'"version": "2014-10-03T22:16:23.634Z" }, { "appId": "/anotherapp", ' \
'"healthCheckResults": [ { "alive": true, "consecutiveFailures": 0, "firstSuccess": "2014-10-03T22:57:02.246Z", ' \
'"lastFailure": null, "lastSuccess": "2014-10-03T22:57:41.649Z", "taskId": "bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799" } ], ' \
'"host": "10.141.141.10", "id": "bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799", "ports": [ 31001 ], "servicePorts": [ 9000 ], ' \
'"stagedAt": "2014-10-03T22:16:33.814Z", "startedAt": "2014-10-03T22:57:41.593Z", "version": "2014-10-03T22:16:23.634Z" } ] }'
m.get('http://fake_server/v2/tasks', text=fake_response)
mock_client = MarathonClient(servers='http://fake_server')
actual_deployments = mock_client.list_tasks()
expected_deployments = [
models.task.MarathonTask(
app_id="/anapp",
health_check_results=[
models.task.MarathonHealthCheckResult(
alive=True,
consecutive_failures=0,
first_success="2014-10-03T22:57:02.246Z",
last_failure=None,
last_success="2014-10-03T22:57:41.643Z",
task_id="bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799"
)
],
host="10.141.141.10",
id="bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799",
ports=[
31000
],
service_ports=[
9000
],
staged_at="2014-10-03T22:16:27.811Z",
started_at="2014-10-03T22:57:41.587Z",
version="2014-10-03T22:16:23.634Z"
),
models.task.MarathonTask(
app_id="/anotherapp",
health_check_results=[
models.task.MarathonHealthCheckResult(
alive=True,
consecutive_failures=0,
first_success="2014-10-03T22:57:02.246Z",
last_failure=None,
last_success="2014-10-03T22:57:41.649Z",
task_id="bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799"
)
],
host="10.141.141.10",
id="bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799",
ports=[31001],
service_ports=[9000],
staged_at="2014-10-03T22:16:33.814Z",
started_at="2014-10-03T22:57:41.593Z",
version="2014-10-03T22:16:23.634Z"
)]
assert actual_deployments == expected_deployments
示例3: _get_hosts_with_container
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
def _get_hosts_with_container(self, context, cluster):
marathon_client = MarathonClient(
'http://' + cluster.api_address + '/marathon/')
hosts = set()
for task in marathon_client.list_tasks():
hosts.add(task.host)
return hosts
示例4: ports_used
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
def ports_used(request):
mc = MarathonClient('http://{}:{}'.format(settings.MARATHON['host'], settings.MARATHON['port']))
apps = mc.list_apps()
used_ports = {}
for app in apps:
tasks = mc.list_tasks(app.id)
for task in tasks:
if task.host in used_ports.keys():
used_ports[task.host].extend(task.ports)
else:
used_ports[task.host] = task.ports
list_host_ports = []
for key in sorted(used_ports.keys()):
list_host_ports.append([key, sorted(used_ports[key])])
data = {}
data['used_ports'] = list_host_ports
return render(request, 'marathon_mgmt/ports_used.html', data)
示例5: open
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
with open('/etc/rabbitmq/rabbitmq-env.conf', 'a') as file:
file.write('[email protected]%s\n' % current_host)
file.write('NODE_PORT=31672\n')
file.write('DIST_PORT=31673\n')
# start rabbit
print "Starting cluster"
endpoint = os.getenv('MARATHON_ENDPOINT')
username = os.getenv('MARATHON_HTTP_USER')
password = os.getenv('MARATHON_HTTP_PASSWORD')
peers = []
if endpoint:
try:
print 'Discovering configuration from %s' % endpoint
c = MarathonClient('http://%s' % endpoint, username=username, password=password)
tasks = c.list_tasks(APP_ID)
for task in tasks:
if task.started_at and task.host != host:
peers.append(task.host)
except:
pass
cluster = None
if len(peers) > 0:
cluster = peers[0]
print 'Found cluster %s' % cluster
if not cluster:
# set ha policy
subprocess.call(['sudo', '-E', 'service', 'rabbitmq-server', 'start'])
time.sleep(10)
示例6: __init__
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
class Scaler:
"""Class for Scaling"""
def __init__(self, app_name, config):
self.logger = logging.getLogger("autoscaling")
self.logger.setLevel(logging.DEBUG)
self.logger.debug("Init object scaler...")
self.config = config
self.logger.debug("Connect RESTful mariadb and get policies...")
conn = http.client.HTTPConnection(config["MARIA_RESTFUL"]['host'], config["MARIA_RESTFUL"]['port'])
conn.request("GET", "/app/name/"+app_name)
json_app = conn.getresponse().read().decode("utf-8")
self.app = json.loads(json_app)
conn.request("GET", "/app/name/"+app_name+"/policies")
json_policies = conn.getresponse().read().decode("utf-8")
self.app["policies"] = json.loads(json_policies)
self.logger.debug("Connect influxdb and marathon...")
self.influx_client = InfluxDBClient(config["INFLUXDB"]["host"], config["INFLUXDB"]["port"], config["INFLUXDB"]["username"], config["INFLUXDB"]["password"], config["INFLUXDB"]["db_name"])
self.marathon_client = MarathonClient('http://'+config["MARATHON"]['host']+':'+config["MARATHON"]['port'])
self.app["instance"] = self.marathon_client.get_app(app_name).instances
self.app["mem"] = self.marathon_client.get_app(app_name).mem
self.app["cpus"] = self.marathon_client.get_app(app_name).cpus
self.logger.debug("Reconfig haproxy.cfg...")
os.system("sudo ./servicerouter.py --marathon http://"+config["MARATHON"]["host"]+":"+config["MARATHON"]["port"]+" --haproxy-config /etc/haproxy/haproxy.cfg")
def setup_logging(self, log_file = "autoscaling.log", level = logging.INFO, formatter = None):
if(formatter == None):
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(log_file)
fh.setLevel(level)
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def get_cpu_usage(self, container_name):
"""Return cpu usage of container_name
@param string container_name container name
"""
query = "select DERIVATIVE(cpu_cumulative_usage) as cpu_usage from stats where container_name = '"+container_name+"' and time > now()-5m group by time(2s) "
result = self.influx_client.query(query)
points = result[0]["points"]
return (points[0][1]/1000000000/self.app["cpus"])*100
def get_container_name(self, mesos_task_id):
"""Return container name mapping with mesos_task_id in messos
@param string mesos_task_id
"""
query = "select container_name from "+self.config["INFLUXDB"]["ts_mapping"]+" where time>now() - 5m and mesos_task_id = '" +mesos_task_id+"' limit 1"
result = self.influx_client.query(query)
points = result[0]["points"]
return points[0][2]
def get_containers_name(self):
"""Return list all containers name of application have name app_name
@param string app_name name of application
@return list all containers name of app_name
"""
tasks = self.marathon_client.list_tasks(self.app["name"])
containers_name = []
for task in tasks:
containers_name.append(self.get_container_name(task.id))
return containers_name
def avg_mem_usage(self, containers_name):
"""Return avg memmory usage of all containers in list containers_name
@param list containers_name list containers name
@return float avg mem usage
"""
number_container = len(containers_name)
containers_name = ["'"+x+"'" for x in containers_name]
containers_name = ",".join(containers_name)
query = "select memory_usage,container_name from stats where time > now()-5m and container_name in ("+containers_name+") limit "+str(number_container*2)
result = self.influx_client.query(query)
points = result[0]["points"]
sum_memory_usage = 0
for point in points:
if(point[3] != None):
sum_memory_usage += point[3]/(self.app["mem"]*1048576)*100
return sum_memory_usage / number_container
def avg_cpu_usage(self, containers_name):
"""Return avg cpu usage of all containers in list containers_name
@param list containers_name list containers name
@return float avg cpu usage
"""
number_container = len(containers_name)
containers_name = ["'"+x+"'" for x in containers_name]
containers_name = ",".join(containers_name)
query = "select DERIVATIVE(cpu_cumulative_usage) as cpu_usage,container_name from stats where time > now()-5m and container_name in ("+containers_name+") group by time(10s),container_name limit "+str(number_container)
result = self.influx_client.query(query)
points = result[0]["points"]
#.........这里部分代码省略.........
示例7: HealthCheckBencher
# 需要导入模块: from marathon import MarathonClient [as 别名]
# 或者: from marathon.MarathonClient import list_tasks [as 别名]
class HealthCheckBencher(object):
def __init__(self, marathon_url, image, tasks):
self.concurrency = 20
self.docker_image = image
self.app_base_name = 'health-check-test-'
self.total_tasks_cout = int(tasks)
self.instances_per_app = 50
if tasks < self.instances_per_app:
self.instances_per_app = self.total_tasks_cout
self.app_count = 1
else:
self.app_count = self.total_tasks_cout/self.instances_per_app
self.heath_check_interval = 30
self.test_duration = 20
self.marathon_cluster = MarathonClient(marathon_url, timeout=240)
self.work_queue = Queue()
self.result_queue = Queue()
self.app_list_queue = Queue()
self.action_list = [self.start_collect,
'sleep={}'.format(self.test_duration),
self.get_stats]
def remove_apps(self):
apps = self.marathon_cluster.list_apps()
for app in apps:
if app.id.startswith("/"+self.app_base_name):
self.marathon_cluster.delete_app(app.id)
active = 0
while True:
apps = self.marathon_cluster.list_apps()
for app in apps:
if app.id.startswith(self.app_base_name):
active += 1
if active == 0:
break
def create_app(self, id):
port_mapping = MarathonContainerPortMapping(container_port=80,
protocol="tcp")
app_docker = MarathonDockerContainer(
image=self.docker_image,
network="BRIDGE",
force_pull_image=True,
port_mappings=[port_mapping])
app_container = MarathonContainer(docker=app_docker)
http_health_check = MarathonHealthCheck(
protocol="HTTP",
path="/status",
grace_period_seconds=300,
interval_seconds=self.heath_check_interval,
timeout_seconds=20,
max_consecutive_failures=0
)
app_suffix = str(md5(str(random())).hexdigest())
app_name = self.app_base_name + app_suffix
new_app = MarathonApp(cpus=CPUS, mem=MEM, disk=DISK,
container=app_container,
health_checks=[http_health_check],
instances=self.instances_per_app,
max_launch_delay_seconds=5)
print("Creating {}".format(app_name))
self.marathon_cluster.create_app(app_id=app_name, app=new_app)
self.app_list_queue.put(app_name)
return None
def wait_instances(self, app_name):
health_ok = 0
while health_ok < self.instances_per_app:
health_ok = 0
tasks = self.marathon_cluster.list_tasks(app_name)
for task in tasks:
if task.health_check_results:
health_ok += 1
def start_collect(self, task):
url = 'http://'+task['host']+':'+str(task['port'])+'/start_collect'
res = urlopen(url)
if res.getcode() == 200:
print(task['id']+': collecter was started')
else:
print(task['id']+': failed to start collecter')
def stop_collect(self, task):
url = 'http://'+task['host']+':'+str(task['port'])+'/stop_collect'
res = urlopen(url)
if res.getcode() == 200:
print(task['id']+': collecter was stopped')
else:
print(task['id']+': failed to stop collecter')
def clear_stats(self, task):
url = 'http://'+task['host']+':'+str(task['port'])+'/clear_stats'
res = urlopen(url)
if res.getcode() == 200:
print(task['id']+': stats was dropped')
else:
print(task['id']+': stats was dropped')
def get_stats(self, task):
#.........这里部分代码省略.........