本文整理汇总了Python中boto.ec2.autoscale.AutoScaleConnection.get_all_launch_configurations方法的典型用法代码示例。如果您正苦于以下问题:Python AutoScaleConnection.get_all_launch_configurations方法的具体用法?Python AutoScaleConnection.get_all_launch_configurations怎么用?Python AutoScaleConnection.get_all_launch_configurations使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boto.ec2.autoscale.AutoScaleConnection
的用法示例。
在下文中一共展示了AutoScaleConnection.get_all_launch_configurations方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _is_up_to_date
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
def _is_up_to_date():
"""
Returns True if this instance is up to date.
"""
# Retrieve instance information.
conn = AutoScaleConnection()
pool = conn.get_all_groups(["LSDA Worker Pool"])[0]
config = conn.get_all_launch_configurations(
names=[pool.launch_config_name])[0]
# Retrive the AMI for this instance and for others.
config_ami = config.image_id
my_ami = urllib.urlopen("http://169.254.169.254/latest/"
"meta-data/ami-id").read()
return config_ami == my_ami
示例2: main
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
def main():
parser = optparse.OptionParser()
parser.add_option( "-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE" )
(options, args) = parser.parse_args()
logging.info( "Using config file [%s]" % options.config_file )
config = parse_config( options.config_file )
aws_access = config.get("AWS", 'access')
aws_secret = config.get("AWS", 'secret')
logging.debug( "Connecting to AWS with access [%s] and secret [%s]" % ( aws_access, aws_secret ) )
aws_connection = AutoScaleConnection( aws_access, aws_secret )
print "AutoScalingGroups:"
lcs = aws_connection.get_all_launch_configurations()
for lc in lcs:
print "%s -> %s" % (lc, lc.__dict__ )
示例3: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
self.access_id = access_id
self.secret_key = secret_key
self.token = token
self.set_endpoint(clc_host)
def set_endpoint(self, endpoint):
#boto.set_stream_logger('scale')
path = '/services/AutoScaling'
reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
port = 8773
if endpoint[len(endpoint)-13:] == 'amazonaws.com':
endpoint = endpoint.replace('ec2', 'autoscaling', 1)
path = '/'
reg = RegionInfo(endpoint=endpoint)
port = 443
self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=self.token, debug=0)
self.conn.APIVersion = '2011-01-01'
if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
self.conn.auth_region_name = 'Eucalyptus'
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
# policy related
def delete_policy(self, policy_name, autoscale_group=None):
return self.conn.delete_policy(policy_name, autoscale_group)
def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Policies.json")
return obj
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
return self.conn.execute_policy(policy_name, as_group, honor_cooldown)
def create_scaling_policy(self, scaling_policy):
return self.conn.create_scaling_policy(scaling_policy)
#.........这里部分代码省略.........
示例4: __init__
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
#.........这里部分代码省略.........
if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
data_centers_intance_ids.append(instance.id)
if data_centers_intance_ids:
self.remove_instances(data_centers_intance_ids)
def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
lb = lbs[0] if lbs else None
if not lb:
hc = HealthCheck(
timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
)
ports = [(80, 80, "http")]
zones = [zone]
lb = self.elb_conn.create_load_balancer(name, zones, ports)
self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
lb.configure_health_check(hc)
if instance_ids:
lb.register_instances(instance_ids)
params = {
"LoadBalancerNames.member.1": lb.name,
"Tags.member.1.Key": "15619project",
"Tags.member.1.Value": project_tag_value,
}
lb.connection.get_status("AddTags", params, verb="POST")
return lb
def remove_elb(self, name):
self.elb_conn.delete_load_balancer(name)
def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
lc = lcs[0] if lcs else None
if not lc:
lc = LaunchConfiguration(
name=name,
image_id=image,
key_name=key_name,
security_groups=[security_groups],
instance_type=instance_type,
)
self.auto_scale_conn.create_launch_configuration(lc)
return lc
def remove_launch_configuration(self, name):
self.auto_scale_conn.delete_launch_configuration(name)
def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
lc = self.create_launch_configuration()
as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
as_group = as_groups[0] if as_groups else None
if not as_group:
as_group = AutoScalingGroup(
group_name=name,
load_balancers=[lb_name],
availability_zones=[zone],
launch_config=lc,
min_size=4,
max_size=4,
health_check_type="ELB",
health_check_period=120,
connection=self.auto_scale_conn,
default_cooldown=self.default_cooldown,
desired_capacity=4,
示例5: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
#boto.set_stream_logger('foo')
path='/services/AutoScaling'
port=8773
if clc_host[len(clc_host)-13:] == 'amazonaws.com':
clc_host = clc_host.replace('ec2', 'autoscaling', 1)
path = '/'
reg = None
port=443
reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=token, debug=0)
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names, max_records, next_token):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
示例6: test_basic
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
def test_basic(self):
# NB: as it says on the tin these are really basic tests that only
# (lightly) exercise read-only behaviour - and that's only if you
# have any autoscale groups to introspect. It's useful, however, to
# catch simple errors
print('--- running %s tests ---' % self.__class__.__name__)
c = AutoScaleConnection()
self.assertTrue(repr(c).startswith('AutoScaleConnection'))
groups = c.get_all_groups()
for group in groups:
self.assertIsInstance(group, AutoScalingGroup)
# get activities
activities = group.get_activities()
for activity in activities:
self.assertIsInstance(activity, Activity)
# get launch configs
configs = c.get_all_launch_configurations()
for config in configs:
self.assertIsInstance(config, LaunchConfiguration)
# get policies
policies = c.get_all_policies()
for policy in policies:
self.assertIsInstance(policy, ScalingPolicy)
# get scheduled actions
actions = c.get_all_scheduled_actions()
for action in actions:
self.assertIsInstance(action, ScheduledUpdateGroupAction)
# get instances
instances = c.get_all_autoscaling_instances()
for instance in instances:
self.assertIsInstance(instance, Instance)
# get all scaling process types
ptypes = c.get_all_scaling_process_types()
for ptype in ptypes:
self.assertTrue(ptype, ProcessType)
# get adjustment types
adjustments = c.get_all_adjustment_types()
for adjustment in adjustments:
self.assertIsInstance(adjustment, AdjustmentType)
# get metrics collection types
types = c.get_all_metric_collection_types()
self.assertIsInstance(types, MetricCollectionTypes)
# create the simplest possible AutoScale group
# first create the launch configuration
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
instance_type='t1.micro')
c.create_launch_configuration(lc)
found = False
lcs = c.get_all_launch_configurations()
for lc in lcs:
if lc.name == lc_name:
found = True
break
assert found
# now create autoscaling group
group_name = 'group-%s' % time_string
group = AutoScalingGroup(name=group_name, launch_config=lc,
availability_zones=['us-east-1a'],
min_size=1, max_size=1)
c.create_auto_scaling_group(group)
found = False
groups = c.get_all_groups()
for group in groups:
if group.name == group_name:
found = True
break
assert found
# now create a tag
tag = Tag(key='foo', value='bar', resource_id=group_name,
propagate_at_launch=True)
c.create_or_update_tags([tag])
found = False
tags = c.get_all_tags()
for tag in tags:
if tag.resource_id == group_name and tag.key == 'foo':
found = True
break
assert found
c.delete_tags([tag])
# shutdown instances and wait for them to disappear
#.........这里部分代码省略.........
示例7: main
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
def main():
parser = argparse.ArgumentParser(
description = "triggers a full LSDA rollout")
parser.add_argument("--inspect", action = "store_true",
help = "pause before baking AMI", default = False)
parser.add_argument("--clean", action = "store_true",
help = "reset from clean Ubuntu 12.04 image", default = False)
parser.add_argument("--no-restart", action = "store_true",
dest = "no_restart", help = "don't restart all nodes in ASG",
default = False)
options = parser.parse_args()
logging.info("Starting rollout.")
conn_ec2 = boto.ec2.connect_to_region("us-east-1")
conn_ec2_as = AutoScaleConnection()
if not options.clean:
logging.info("Searching for existing images...")
group = conn_ec2_as.get_all_groups(['LSDA Worker Pool'])[0]
launch_config = conn_ec2_as.get_all_launch_configurations(
names=[group.launch_config_name])[0]
existing_images = conn_ec2.get_all_images(owners = ["self"])[0]
ami_id = launch_config.image_id
logging.info("Using existing image {0}".format(ami_id))
else:
ami_id = 'ami-59a4a230' # Clean Ubuntu 12.04.
logging.info("Using base image {0}".format(ami_id))
reservation = conn_ec2.run_instances(
image_id = ami_id,
key_name = 'jeremy-aws-key',
instance_type = 't1.micro',
security_groups = ['Worker Nodes'],
)
try:
instance = reservation.instances[0]
logging.info("Waiting for instance {} to start...".format(instance.id))
instance.update()
while instance.ip_address is None:
logging.info("Not ready. Retrying in 10 seconds...")
time.sleep(10)
instance.update()
while True:
result = subprocess.call(["ssh", "-o",
"UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no",
"[email protected]{}".format(instance.ip_address), "uname -r"])
if result != 0:
logging.info("Not ready for SSH. Retrying in 10 seconds...")
time.sleep(10)
else:
break
logging.info("Instance has started; running setup script.")
logging.info("(IP address is {})".format(instance.ip_address))
subprocess.check_call(["ssh", "-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
"[email protected]{}".format(instance.ip_address),
"sudo stop lsda; sleep 20; sudo rm worker.sh;"
"wget https://raw.github.com/fatlotus/lsda-infrastructure/"
"master/servers/worker.sh; sudo bash worker.sh"])
if options.inspect:
logging.info("Connect to [email protected]{} to inspect the image."
.format(instance.ip_address))
logging.info("When you're done, press CTRL-C.")
try:
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
logging.info("Creating AMI from existing image.")
new_image = instance.create_image(
name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
format(datetime.datetime.now())),
description = "(automatically generated)"
)
time.sleep(10)
image_object = conn_ec2.get_image(new_image)
while image_object.state == "pending":
logging.info("State is still pending. Retrying in 10 seconds.")
time.sleep(10)
image_object.update()
finally:
#.........这里部分代码省略.........
示例8: add_ingress_rule
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
def add_ingress_rule(dry_run, go_agent_security_group, go_agent_security_group_owner, go_agent_security_group_name):
"""
For each ASG (app) in each VPC, add a rule to each SG associated with the ASG's launch configuration
that allows SSH ingress from the GoCD agents' SG.
BEFORE RUNNING THIS SCRIPT!:
- Use the assume_role bash script to assume the role in the proper account/VPC (edx, edge, mckinsey, etc.)
- If you don't know what this is, ask someone in DevOps.
- THEN run this script.
"""
asg_conn = AutoScaleConnection()
ec2_conn = boto.ec2.connect_to_region('us-east-1')
asgs = []
launch_configs = {}
security_groups = {}
logging.debug('All ASGs:')
for group in asg_conn.get_all_groups():
logging.debug(' {}'.format(group))
asgs.append(group)
logging.debug('All launch configurations:')
for launch_config in asg_conn.get_all_launch_configurations():
logging.debug(' {}'.format(launch_config))
launch_configs[launch_config.name] = launch_config
logging.debug('All security groups:')
for sec_group in ec2_conn.get_all_security_groups():
logging.debug(' {}'.format(sec_group))
security_groups[sec_group.id] = sec_group
# Validate that each ASG has a launch configuration.
for group in asgs:
try:
logging.info("Launch configuration for ASG '{}' is '{}'.".format(
group.name, launch_configs[group.launch_config_name]
))
except KeyError:
logging.error("Launch configuration '{}' for ASG '{}' was not found!".format(
group.launch_config_name, group.name
))
raise
# Construct a fake security group for the prod-tools-goagent-sg security group in the edx-tools account.
# This group will be used to grant the go-agents ingress into the ASG's VPCs.
go_agent_security_group = boto.ec2.securitygroup.SecurityGroup(
name=go_agent_security_group_name,
owner_id=go_agent_security_group_owner,
id=go_agent_security_group
)
# For each launch config, check for the security group. Can support multiple security groups
# but the edX DevOps convention is to use a single security group.
for group in asgs:
launch_config = launch_configs[group.launch_config_name]
if len(launch_config.security_groups) > 1:
err_msg = "Launch config '{}' for ASG '{}' has more than one security group!: {}".format(
launch_config.name, group.name, launch_config.security_groups
)
logging.warning(err_msg)
continue
sg_name = launch_config.security_groups[0]
try:
# Find the security group.
sec_group = security_groups[sg_name]
except KeyError:
logging.error("Security group '{}' for ASG '{}' was not found!.".format(sg_name, group.name))
logging.info('BEFORE: Rules for security group {}:'.format(sec_group.name))
logging.info(sec_group.rules)
try:
# Add the ingress rule to the security group.
yes_no = raw_input("Apply the change to this security group? [Yes]")
if yes_no in ("", "y", "Y", "yes"):
sec_group.authorize(
ip_protocol='tcp',
from_port=22,
to_port=22,
src_group=go_agent_security_group,
dry_run=dry_run
)
except boto.exception.EC2ResponseError as exc:
if exc.status == 412:
# If the dry_run flag is set, then each rule addition will raise this exception.
# Log it and carry on.
logging.info('Dry run is True but rule addition would have succeeded for security group {}.'.format(
sg_name
))
elif exc.code == "InvalidPermission.Duplicate":
logging.info("Rule already exists for {}.".format(sg_name))
else:
raise
logging.info('AFTER: Rules for security group {}:'.format(sg_name))
logging.info(sec_group.rules)
示例9: IcsAS
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
class IcsAS(object):
"""
ICS Library for AutoScale
"""
def __init__(self, region, **kwargs):
self.conn = AutoScaleConnection(region=get_region(region), **kwargs)
def to_list(self, input):
"""
Validate input, if not list, but string, make it as a list
"""
if input is None:
return input
elif isinstance(input, list):
return input
elif isinstance(input, basestring):
return [input]
else:
raise IcsASException("Need the type '%s' but '%s' found"
% ('list', type(input)))
def get_group_name_from_instance(self, instance_id):
"""
Get the ASG name from the specific instance id
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: name of the ASG, this instance belongs to
"""
instances = self.conn.get_all_autoscaling_instances(
instance_ids=self.to_list(instance_id))
if instances:
return instances[0].group_name
else:
return None
def get_instances_from_group_name(self, name):
"""
Get the instance from the specific ASG name
:type name: string
:param name: the specific ASG name
:rtype: list
:return: a list contains all the instances
"""
instances = []
for group in self.conn.get_all_groups(names=self.to_list(name)):
instances.extend(group.instances)
return instances
def get_group_from_name(self, name):
"""
Get the ASG from its name
:type name: string
:param name: the ASG name
:rtype: list
:return: a list represents the specific ASG(s)
"""
return self.conn.get_all_groups(names=self.to_list(name))
def get_launch_config_from_name(self, name):
"""
Get the Launch Configuration from its name
:type name: string
:param name: the Launch Configuration name
:rtype: list
:return: a list represents the specific Launch Configuration(s)
"""
return self.conn.get_all_launch_configurations(
names=self.to_list(name))
def create_launch_config(self, launch_config):
"""
Create the Launch Configuration
:type launch_config: class
:param launch_config: boto launch_config object
:rtype: string
:return: AWS request Id
"""
return self.conn.create_launch_configuration(launch_config)
def delete_launch_config_from_name(self, name):
"""
Delete the Launch Configuration from its name
:type name: string
:param name: the name of launch configuration
:rtype: string
#.........这里部分代码省略.........
示例10: RegionInfo
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
region_name='ap-southeast-1'
region_ec2_endpoint='ec2.ap-southeast-1.amazonaws.com'
region_autoscale_endpoint='autoscaling.ap-southeast-1.amazonaws.com'
# Engine Scaling conf names to clean
scaling_confs = ['scaling_conf_name_1','scaling_conf_name_2']
# Connect EC2
aws_region = RegionInfo(name=region_name, endpoint=region_endpoint)
conn = EC2Connection(aws_access_key_id,aws_secret_access_key,region=aws_region)
# Connect autoscaling service
aws_region_as = RegionInfo(name=region_name, endpoint=region_autoscale_endpoint)
conn_as = AutoScaleConnection(aws_access_key_id, aws_secret_access_key,region=aws_region_as)
lcs = conn_as.get_all_launch_configurations(names=scaling_confs)
for lc in lcs:
try:
img = conn.get_image(lc.image_id)
snaps = conn.get_all_snapshots(filters={"description":"*"+img.id+"*"})
image.deregister(delete_snapshot=False)
for snap in snaps:
snap.delete()
print "scaling configuration image and these related "+str(snaps)+ " snapshots removed"
except:
print "ami not found " + lc.image_id
pass
conn_as.delete_launch_configuration(lc.name)
print "\ndeleted scaling configuration "+ str(lc.name)
示例11: Cloud
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import get_all_launch_configurations [as 别名]
class Cloud(object):
def __init__(self, cloud_config):
self.config = cloud_config
self.all_instances = []
self.failed_launch = False
self.failed_count = 0
self.failed_last_valid_count = 0
self._conn = None
self._as_conn = None
self._lc = None
self._asg = None
self._last_asg_launch_attempt = None
self.maxed = False
self._last_launch_attempt = datetime.datetime.utcnow()
self._initialize()
def _create_connection(self):
LOG.debug("Creating connection for %s" % self.config.name)
self._conn = boto.connect_ec2(self.config.access_id,
self.config.secret_key,
validate_certs=False)
self._conn.host = self.config.cloud_uri
self._conn.port = self.config.cloud_port
def _create_autoscale_connection(self):
LOG.debug("Creating autoscale connection for %s" % self.config.name)
region = RegionInfo(name=self.config.cloud_type,
endpoint=self.config.as_uri)
self._as_conn = AutoScaleConnection(
aws_access_key_id=self.config.access_id,
aws_secret_access_key=self.config.secret_key,
is_secure=True,
port=self.config.as_port,
region=region,
validate_certs=False)
def _create_or_set_launch_configuration(self):
name = self.config.lc_name
if not self._lc:
LOG.debug("Attempting to load launch configuration: %s" % (name))
lc = self._as_conn.get_all_launch_configurations(names=[name])
if len(lc) == 1:
LOG.debug("Launch configuration %s found." % (name))
self._lc = lc[0]
if not self._lc:
#TODO(pdmars): key and security groups are hardcoded for now, gross
if self.config.user_data_file is not None:
user_data_file = self.config.user_data_file
with open(user_data_file) as f:
user_data = f.read()
else:
user_data = None
LOG.debug("Creating launch configuration %s" % name)
LOG.debug("\tname: %s" % name)
LOG.debug("\timage_id: %s" % self.config.image_id)
LOG.debug("\tinstance_type: %s" % self.config.instance_type)
LOG.debug("\tuser_data: %s" % user_data)
self._lc = LaunchConfiguration(
name=name,
image_id=self.config.image_id,
key_name="phantomkey",
security_groups=['default'],
instance_type=self.config.instance_type,
user_data=user_data)
self._as_conn.create_launch_configuration(self._lc)
def _create_or_set_autoscale_group(self):
name = self.config.asg_name
if not self._asg:
LOG.debug("Attempting to load autoscale group: %s" % name)
asg = self._as_conn.get_all_groups(names=[name])
LOG.debug("Autoscale group: %s" % asg)
if len(asg) == 1:
LOG.debug("Autoscale group %s found." % name)
self._asg = asg[0]
if not self._asg:
# TODO(pdmars): more hard coded grossness, for now
try:
cloud_guess = self.config.lc_name.split("@")[1].strip()
except Exception as e:
LOG.warn("Unable to guess cloud for auto scale tags")
LOG.warn("Setting cloud to hotel")
cloud_guess = "hotel"
policy_name_key = "PHANTOM_DEFINITION"
policy_name = "error_overflow_n_preserving"
ordered_clouds_key = "clouds"
n_preserve_key = "minimum_vms"
ordered_clouds = cloud_guess + ":-1"
n_preserve = 0
policy_tag = Tag(connection=self._as_conn, key=policy_name_key,
value=policy_name, resource_id=name)
clouds_tag = Tag(connection=self._as_conn, key=ordered_clouds_key,
value=ordered_clouds, resource_id=name)
npreserve_tag = Tag(connection=self._as_conn, key=n_preserve_key,
value=n_preserve, resource_id=name)
tags = [policy_tag, clouds_tag, npreserve_tag]
zones = [self.config.az]
LOG.debug("Creating autoscale group %s" % name)
LOG.debug("\tname: %s" % name)
LOG.debug("\tavailability_zones: %s" % zones)
#.........这里部分代码省略.........