本文整理汇总了Python中boto.ec2.autoscale.AutoScaleConnection.create_launch_configuration方法的典型用法代码示例。如果您正苦于以下问题:Python AutoScaleConnection.create_launch_configuration方法的具体用法?Python AutoScaleConnection.create_launch_configuration怎么用?Python AutoScaleConnection.create_launch_configuration使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boto.ec2.autoscale.AutoScaleConnection
的用法示例。
在下文中一共展示了AutoScaleConnection.create_launch_configuration方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_ebs_optimized_regression
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
def test_ebs_optimized_regression(self):
c = AutoScaleConnection()
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(
name=lc_name,
image_id='ami-2272864b',
instance_type='t1.micro',
ebs_optimized=True
)
# This failed due to the difference between native Python ``True/False``
# & the expected string variants.
c.create_launch_configuration(lc)
self.addCleanup(c.delete_launch_configuration, lc_name)
示例2: create_autoscaling_group
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
def create_autoscaling_group():
global img
conn = AutoScaleConnection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])
autoscale = boto.ec2.autoscale.connect_to_region('us-east-1')
print conn.get_all_groups()
timestamp = time.time()
value = datetime.datetime.fromtimestamp(timestamp)
humanreadabledate = value.strftime('%Y-%m-%d_%H.%M.%S')
config_name = 'live_launch_config'+humanreadabledate
init_script = "#!/bin/sh /home/ec2-user/sds/deployment_scripts/initialize_server.py"
lc = LaunchConfiguration(name=config_name, image_id=img,
key_name='SDSEastKey',
security_groups=['sg-a7afb1c2'],
user_data=init_script)
conn.create_launch_configuration(lc)
ag = AutoScalingGroup(group_name=config_name, load_balancers=['SDSLiveLoadBalancer'], availability_zones=['us-east-1a'], launch_config=lc, min_size=2, max_size=2, connection=conn)
conn.create_auto_scaling_group(ag)
示例3: launch_auto_scaling
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
def launch_auto_scaling(stage = 'development'):
config = get_provider_dict()
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'], fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'], host='%s.autoscaling.amazonaws.com' % config['location'][:-1])
for name, values in config.get(stage, {}).get('autoscale', {}):
if any(group.name == name for group in conn.get_all_groups()):
fabric.api.warn(fabric.colors.orange('Autoscale group %s already exists' % name))
continue
lc = LaunchConfiguration(name = '%s-launch-config' % name, image_id = values['image'], key_name = config['key'])
conn.create_launch_configuration(lc)
ag = AutoScalingGroup(group_name = name, load_balancers = values.get('load-balancers'), availability_zones = [config['location']], launch_config = lc, min_size = values['min-size'], max_size = values['max-size'])
conn.create_auto_scaling_group(ag)
if 'min-cpu' in values and 'max-cpu' in values:
tr = Trigger(name = '%s-trigger' % name, autoscale_group = ag, measure_name = 'CPUUtilization', statistic = 'Average', unit = 'Percent', dimensions = [('AutoScalingGroupName', ag.name)],
period = 60, lower_threshold = values['min-cpu'], lower_breach_scale_increment = '-1', upper_threshold = values['max-cpu'], upper_breach_scale_increment = '2', breach_duration = 60)
conn.create_trigger(tr)
示例4: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
self.access_id = access_id
self.secret_key = secret_key
self.token = token
self.set_endpoint(clc_host)
def set_endpoint(self, endpoint):
#boto.set_stream_logger('scale')
path = '/services/AutoScaling'
reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
port = 8773
if endpoint[len(endpoint)-13:] == 'amazonaws.com':
endpoint = endpoint.replace('ec2', 'autoscaling', 1)
path = '/'
reg = RegionInfo(endpoint=endpoint)
port = 443
self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=self.token, debug=0)
self.conn.APIVersion = '2011-01-01'
if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
self.conn.auth_region_name = 'Eucalyptus'
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
# policy related
def delete_policy(self, policy_name, autoscale_group=None):
return self.conn.delete_policy(policy_name, autoscale_group)
def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Policies.json")
return obj
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
return self.conn.execute_policy(policy_name, as_group, honor_cooldown)
def create_scaling_policy(self, scaling_policy):
return self.conn.create_scaling_policy(scaling_policy)
#.........这里部分代码省略.........
示例5: __init__
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
#.........这里部分代码省略.........
for instance in reservation.instances:
if "Name" in instance.tags and instance.tags["Name"] == name and instance.state == "running":
data_centers_intance_ids.append(instance.id)
if data_centers_intance_ids:
self.remove_instances(data_centers_intance_ids)
def create_elb(self, name, zone, project_tag_value, security_group_id, instance_ids=None):
lbs = [l for l in self.elb_conn.get_all_load_balancers() if l.name == name]
lb = lbs[0] if lbs else None
if not lb:
hc = HealthCheck(
timeout=50, interval=60, healthy_threshold=2, unhealthy_threshold=8, target="HTTP:80/heartbeat"
)
ports = [(80, 80, "http")]
zones = [zone]
lb = self.elb_conn.create_load_balancer(name, zones, ports)
self.elb_conn.apply_security_groups_to_lb(name, [security_group_id])
lb.configure_health_check(hc)
if instance_ids:
lb.register_instances(instance_ids)
params = {
"LoadBalancerNames.member.1": lb.name,
"Tags.member.1.Key": "15619project",
"Tags.member.1.Value": project_tag_value,
}
lb.connection.get_status("AddTags", params, verb="POST")
return lb
def remove_elb(self, name):
self.elb_conn.delete_load_balancer(name)
def create_launch_configuration(self, name, image, key_name, security_groups, instance_type):
lcs = [l for l in self.auto_scale_conn.get_all_launch_configurations() if l.name == name]
lc = lcs[0] if lcs else None
if not lc:
lc = LaunchConfiguration(
name=name,
image_id=image,
key_name=key_name,
security_groups=[security_groups],
instance_type=instance_type,
)
self.auto_scale_conn.create_launch_configuration(lc)
return lc
def remove_launch_configuration(self, name):
self.auto_scale_conn.delete_launch_configuration(name)
def create_autoscaling_group(self, name, lb_name, zone, tags, instance_ids=None):
lc = self.create_launch_configuration()
as_groups = [a for a in self.auto_scale_conn.get_all_groups() if a.name == name]
as_group = as_groups[0] if as_groups else None
if not as_group:
as_group = AutoScalingGroup(
group_name=name,
load_balancers=[lb_name],
availability_zones=[zone],
launch_config=lc,
min_size=4,
max_size=4,
health_check_type="ELB",
health_check_period=120,
connection=self.auto_scale_conn,
default_cooldown=self.default_cooldown,
示例6: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
#boto.set_stream_logger('foo')
path='/services/AutoScaling'
port=8773
if clc_host[len(clc_host)-13:] == 'amazonaws.com':
clc_host = clc_host.replace('ec2', 'autoscaling', 1)
path = '/'
reg = None
port=443
reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=token, debug=0)
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names, max_records, next_token):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
示例7: create_autoscaling
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
def create_autoscaling(ami_id, sns_arn):
"""
Creates the autoscaling group for proxy instances
Inspired by boto autoscaling tutorial.
"""
con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_access_key_id=AWS_ACCESS_KEY,
region=RegionInfo(name=REGION,
endpoint='autoscaling.%s.amazonaws.com' % REGION))
print "Creating autoscaling configuration.."
config = LaunchConfiguration(name=AUTOSCALING_GROUP_NAME,
image_id=ami_id,
key_name=KEY_NAME,
security_groups=[EC2_SECURITY_GROUP_NAME],
instance_type=INSTANCE_TYPE)
con.create_launch_configuration(config)
print "Create autoscaling group..."
ag = AutoScalingGroup(name=AUTOSCALING_GROUP_NAME,
launch_config=config,
availability_zones=["{0}a".format(REGION)],
load_balancers=[ELB_NAME],
min_size=AUTOSCALING_MIN_INSTANCES,
max_size=AUTOSCALING_MAX_INSTANCES,
group_name=AUTOSCALING_GROUP_NAME)
con.create_auto_scaling_group(ag)
# fetch the autoscale group after it is created (unused but may be necessary)
_ = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]
# Create tag name for autoscaling-created machines
as_tag = Tag(key='Name', value=AUTOSCALING_GROUP_NAME, propagate_at_launch=True, resource_id=AUTOSCALING_GROUP_NAME)
con.create_or_update_tags([as_tag])
print "Creating autoscaling policy..."
scaleup_policy = ScalingPolicy(name='scale_up',
adjustment_type='ChangeInCapacity',
as_name=AUTOSCALING_GROUP_NAME,
scaling_adjustment=1,
cooldown=AUTOSCALING_COOLDOWN_PERIOD)
scaledown_policy = ScalingPolicy(name='scale_down',
adjustment_type='ChangeInCapacity',
as_name=AUTOSCALING_GROUP_NAME,
scaling_adjustment=-1,
cooldown=AUTOSCALING_COOLDOWN_PERIOD)
con.create_scaling_policy(scaleup_policy)
con.create_scaling_policy(scaledown_policy)
# Get freshened policy objects
scaleup_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_up'])[0]
scaledown_policy = con.get_all_policies(as_group=AUTOSCALING_GROUP_NAME, policy_names=['scale_down'])[0]
print "Creating cloudwatch alarms"
cloudwatch_con = CloudWatchConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_access_key_id=AWS_ACCESS_KEY,
region=RegionInfo(name=REGION,
endpoint='monitoring.%s.amazonaws.com' % REGION))
alarm_dimensions = {"AutoScalingGroupName": AUTOSCALING_GROUP_NAME}
scaleup_alarm = MetricAlarm(name='scale_up_on_cpu',
namespace='AWS/EC2',
metric='CPUUtilization',
statistic='Average',
comparison='>',
threshold=AUTOSCALING_CPU_MAX_THRESHOLD,
period='60',
evaluation_periods=1,
alarm_actions=[scaleup_policy.policy_arn, sns_arn],
dimensions=alarm_dimensions)
# Don't send SNS on scaledown policy
scaledown_alarm = MetricAlarm(name='scale_down_on_cpu',
namespace='AWS/EC2',
metric='CPUUtilization',
statistic='Average',
comparison='<',
threshold=AUTOSCALING_CPU_MIN_THRESHOLD,
period='60',
evaluation_periods=1,
alarm_actions=[scaledown_policy.policy_arn],
dimensions=alarm_dimensions)
cloudwatch_con.create_alarm(scaleup_alarm)
cloudwatch_con.create_alarm(scaledown_alarm)
示例8: test_basic
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
def test_basic(self):
# NB: as it says on the tin these are really basic tests that only
# (lightly) exercise read-only behaviour - and that's only if you
# have any autoscale groups to introspect. It's useful, however, to
# catch simple errors
print('--- running %s tests ---' % self.__class__.__name__)
c = AutoScaleConnection()
self.assertTrue(repr(c).startswith('AutoScaleConnection'))
groups = c.get_all_groups()
for group in groups:
self.assertIsInstance(group, AutoScalingGroup)
# get activities
activities = group.get_activities()
for activity in activities:
self.assertIsInstance(activity, Activity)
# get launch configs
configs = c.get_all_launch_configurations()
for config in configs:
self.assertIsInstance(config, LaunchConfiguration)
# get policies
policies = c.get_all_policies()
for policy in policies:
self.assertIsInstance(policy, ScalingPolicy)
# get scheduled actions
actions = c.get_all_scheduled_actions()
for action in actions:
self.assertIsInstance(action, ScheduledUpdateGroupAction)
# get instances
instances = c.get_all_autoscaling_instances()
for instance in instances:
self.assertIsInstance(instance, Instance)
# get all scaling process types
ptypes = c.get_all_scaling_process_types()
for ptype in ptypes:
self.assertTrue(ptype, ProcessType)
# get adjustment types
adjustments = c.get_all_adjustment_types()
for adjustment in adjustments:
self.assertIsInstance(adjustment, AdjustmentType)
# get metrics collection types
types = c.get_all_metric_collection_types()
self.assertIsInstance(types, MetricCollectionTypes)
# create the simplest possible AutoScale group
# first create the launch configuration
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
instance_type='t1.micro')
c.create_launch_configuration(lc)
found = False
lcs = c.get_all_launch_configurations()
for lc in lcs:
if lc.name == lc_name:
found = True
break
assert found
# now create autoscaling group
group_name = 'group-%s' % time_string
group = AutoScalingGroup(name=group_name, launch_config=lc,
availability_zones=['us-east-1a'],
min_size=1, max_size=1)
c.create_auto_scaling_group(group)
found = False
groups = c.get_all_groups()
for group in groups:
if group.name == group_name:
found = True
break
assert found
# now create a tag
tag = Tag(key='foo', value='bar', resource_id=group_name,
propagate_at_launch=True)
c.create_or_update_tags([tag])
found = False
tags = c.get_all_tags()
for tag in tags:
if tag.resource_id == group_name and tag.key == 'foo':
found = True
break
assert found
c.delete_tags([tag])
# shutdown instances and wait for them to disappear
#.........这里部分代码省略.........
示例9: AutoScaleConnection
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
params = {"LoadBalancerNames.member.1": lb.name,
"Tags.member.1.Key": 'Project',
"Tags.member.1.Value": '2.2'}
s=lb.connection.get_status('AddTags', params, verb='POST')
print 'Load Balancer DNS: ' + lb_dns
# -------------------------Create Auto Scaling Group----------------------------
con_as = AutoScaleConnection()
lc = LaunchConfiguration(name='Project2.2_Lauch_Config',
image_id=DC_IMAGE,
key_name=KEY_NAME,
security_groups=SECURITY_GROUP2,
instance_type=DC_INSTANCE_TYPE,
instance_monitoring=DETAIL_MON)
con_as.create_launch_configuration(lc)
asg = AutoScalingGroup(name='Project2.2_AutoSacling_Group',
load_balancers=[elb['name']],
availability_zones=ZONE,
health_check_period='120',
health_check_type='ELB',
launch_config=lc,
min_size=1,
max_size=5,
tags=[Tag(key='Project', value='2.2',propagate_at_launch=True,resource_id='Project2.2_AutoSacling_Group')])
con_as.create_auto_scaling_group(asg)
# -------------------------Create Scaling Policies------------------------------
scaleOut = ScalingPolicy(name='ScaleOut',
adjustment_type='ChangeInCapacity',
示例10: main
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
#.........这里部分代码省略.........
instance.update()
while instance.ip_address is None:
logging.info("Not ready. Retrying in 10 seconds...")
time.sleep(10)
instance.update()
while True:
result = subprocess.call(["ssh", "-o",
"UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no",
"[email protected]{}".format(instance.ip_address), "uname -r"])
if result != 0:
logging.info("Not ready for SSH. Retrying in 10 seconds...")
time.sleep(10)
else:
break
logging.info("Instance has started; running setup script.")
logging.info("(IP address is {})".format(instance.ip_address))
subprocess.check_call(["ssh", "-o", "UserKnownHostsFile=/dev/null",
"-o", "StrictHostKeyChecking=no",
"[email protected]{}".format(instance.ip_address),
"sudo stop lsda; sleep 20; sudo rm worker.sh;"
"wget https://raw.github.com/fatlotus/lsda-infrastructure/"
"master/servers/worker.sh; sudo bash worker.sh"])
if options.inspect:
logging.info("Connect to [email protected]{} to inspect the image."
.format(instance.ip_address))
logging.info("When you're done, press CTRL-C.")
try:
while True:
time.sleep(3600)
except KeyboardInterrupt:
pass
logging.info("Creating AMI from existing image.")
new_image = instance.create_image(
name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
format(datetime.datetime.now())),
description = "(automatically generated)"
)
time.sleep(10)
image_object = conn_ec2.get_image(new_image)
while image_object.state == "pending":
logging.info("State is still pending. Retrying in 10 seconds.")
time.sleep(10)
image_object.update()
finally:
logging.warn("Stopping all nodes.")
for node in reservation.instances:
node.terminate()
logging.info("Creating new LaunchConfiguration.")
mapping = BlockDeviceMapping()
mapping["/dev/sdb"] = BlockDeviceType(ephemeral_name = "ephemeral0")
mapping["/dev/sdc"] = BlockDeviceType(ephemeral_name = "ephemeral1")
new_launch_config = LaunchConfiguration(
conn_ec2_as,
name = ('Latest-{:%Y-%m-%d--%H-%M-%S}'.
format(datetime.datetime.now())),
image_id = new_image,
security_groups = ['sg-f9a08492'],
instance_type = 'c3.large',
block_device_mappings = [mapping],
instance_profile_name = ("arn:aws:iam::470084502640:instance-profile"
"/dal-access"),
spot_price = 0.02,
)
conn_ec2_as.create_launch_configuration(new_launch_config)
logging.info("Setting launch configuration in existing ASG.")
group.launch_config_name = new_launch_config.name
group.update()
logging.info("Cleaning up old launch configurations.")
for config in conn_ec2_as.get_all_launch_configurations():
if config.image_id != new_launch_config.image_id:
conn_ec2_as.delete_launch_configuration(config.name)
logging.info("Cleaning up old images.")
for image in conn_ec2.get_all_images(filters={"name":["LatestImage"]}):
if image.id != new_image:
conn_ec2.deregister_image(image.id, True)
logging.info("Rollout complete. New image is {}.".format(new_image))
if not options.no_restart:
logging.info("Triggering reload of all nodes in ASG.")
for instance in group.instances:
for reservation in conn_ec2.get_all_instances(instance.instance_id):
reservation.stop_all()
示例11: IcsAS
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class IcsAS(object):
"""
ICS Library for AutoScale
"""
def __init__(self, region, **kwargs):
self.conn = AutoScaleConnection(region=get_region(region), **kwargs)
def to_list(self, input):
"""
Validate input, if not list, but string, make it as a list
"""
if input is None:
return input
elif isinstance(input, list):
return input
elif isinstance(input, basestring):
return [input]
else:
raise IcsASException("Need the type '%s' but '%s' found"
% ('list', type(input)))
def get_group_name_from_instance(self, instance_id):
"""
Get the ASG name from the specific instance id
:type instance_id: string
:param instance_id: EC2 instance id startwith 'i-xxxxxxx'
:rtype: string
:return: name of the ASG, this instance belongs to
"""
instances = self.conn.get_all_autoscaling_instances(
instance_ids=self.to_list(instance_id))
if instances:
return instances[0].group_name
else:
return None
def get_instances_from_group_name(self, name):
"""
Get the instance from the specific ASG name
:type name: string
:param name: the specific ASG name
:rtype: list
:return: a list contains all the instances
"""
instances = []
for group in self.conn.get_all_groups(names=self.to_list(name)):
instances.extend(group.instances)
return instances
def get_group_from_name(self, name):
"""
Get the ASG from its name
:type name: string
:param name: the ASG name
:rtype: list
:return: a list represents the specific ASG(s)
"""
return self.conn.get_all_groups(names=self.to_list(name))
def get_launch_config_from_name(self, name):
"""
Get the Launch Configuration from its name
:type name: string
:param name: the Launch Configuration name
:rtype: list
:return: a list represents the specific Launch Configuration(s)
"""
return self.conn.get_all_launch_configurations(
names=self.to_list(name))
def create_launch_config(self, launch_config):
"""
Create the Launch Configuration
:type launch_config: class
:param launch_config: boto launch_config object
:rtype: string
:return: AWS request Id
"""
return self.conn.create_launch_configuration(launch_config)
def delete_launch_config_from_name(self, name):
"""
Delete the Launch Configuration from its name
:type name: string
:param name: the name of launch configuration
:rtype: string
#.........这里部分代码省略.........
示例12: __init__
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class aws:
def __init__(self,PREFIX='tfound-',ENV='dev',AMI='',TYPE='',SIZE='',
DOMAIN='tfound',SSHKEY='myprivatekey',AWSKEY='',AWSSECRET='',AVAIL_ZONES=["us-east-1a","us-east-1b","us-east-1c","us-east-1d"]):
'''
Shows examples
Create load balancer group 'tfound-dev-web-lb' for web servers, in dev group for tfound:
python control-lb-and-groups.py --createlb --env dev --aws SC --type web
Add an instance to the load balancer group:
python control-lb-and-groups.py --addtolb=true --env dev --aws SC --type web --instance=i-999999
Create launch config using ami ami-fa6b8393 (default), medium sized instance, and Autoscale Group 'tfound-dev-web-group' with a min of 2 instances, max 5, with health check on port 80:
python control-lb-and-groups.py --createlc --ami ami-fa6b8393 --size c1.medium --env dev --aws SC --type web --createag --min 2 --max 5
Triggers/Health checks are hard coded to spawn new instances when total cpu reaches 60 percent or health check fails.
'''
self.PREFIX=PREFIX+DOMAIN+'-'+ENV+'-'+TYPE
self.ENV=ENV
self.AMI=AMI
self.TYPE=TYPE
self.DOMAIN=DOMAIN
self.SIZE=SIZE
self.MIN=MIN
self.MAX=MAX
self.SSHKEY=SSHKEY
self.AWSKEY=AWSKEY
self.AWSSECRET=AWSSECRET
self.AVAIL_ZONES=AVAIL_ZONES
self.LBNAME=self.PREFIX+'-lb'
self.AGNAME=self.PREFIX+'-group'
self.TRNAME=self.PREFIX+'-trigger'
self.LCNAME=self.PREFIX+'-launch_config'
self.asconn=AutoScaleConnection(self.AWSKEY, self.AWSSECRET)
self.elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
self.lc = self._buildLaunchConfig()
self.ag = self._buildAutoscaleGroup()
def _buildLaunchConfig(self):
return LaunchConfiguration(name=self.LCNAME,
image_id=self.AMI,
key_name=self.SSHKEY,
security_groups=[self.ENV+'.'+self.TYPE],
user_data='LAUNCHTAGS="'+self.ENV+' '
+self.TYPE+' '+self.DOMAIN+'";',
instance_type=self.SIZE)
def _buildAutoscaleGroup(self):
return AutoScalingGroup(group_name=self.AGNAME,
load_balancers=[self.LBNAME],
availability_zones=self.AVAIL_ZONES,
launch_config=self.lc,
min_size=self.MIN,
max_size=self.MAX)
def getGroups(self):
'''get existing lb groups'''
# conn = AutoScaleConnection(AWSKEY, AWSSECRET)
#conn = AutoScaleConnection()
return self.asconn.get_all_groups()
def getActivities(self,AUTOSCALE_GROUP=None):
return self.asconn.get_all_activities(AUTOSCALE_GROUP)
def createLaunchConfig(self):
'''create Launch Configuration to define initial startup params
'''
#conn = AutoScaleConnection(AWSKEY, AWSSECRET)
#lc = self.buildLaunchConfig()
return self.asconn.create_launch_configuration(self.lc)
def createAutoscaleGroup(self):
'''We now have created a launch configuration called tfound...launch-config.
We are now ready to associate it with our new autoscale group.
returns autoscale object
'''
#conn = AutoScaleConnection(AWSKEY, AWSSECRET)
#lc = self.buildLaunchConfig()
return self.asconn.create_auto_scaling_group(self.ag)
#conn.get_all_activities(ag)
def createTrigger(self,AUTOSCALE_GROUP=None):
'''
you create a trigger on a group, pass in a group object
this creates a trigger that scales up to MAX instances if average cpu utilitzation goes over 60,
scales down to MIN instances if under 40 avg cpu
'''
#conn = AutoScaleConnection(AWSKEY, AWSSECRET)
tr = Trigger(name=self.TRNAME,
autoscale_group=AUTOSCALE_GROUP,
measure_name='CPUUtilization', statistic='Average',
unit='Percent',
dimensions=[('AutoScalingGroupName', AUTOSCALE_GROUP.name),
('Namespace','AWS/EC2')],
period=120, lower_threshold=10,
lower_breach_scale_increment='-1',
upper_threshold=30,
upper_breach_scale_increment='1',
breach_duration=360)
return self.asconn.create_trigger(tr)
#.........这里部分代码省略.........
示例13: LaunchConfiguration
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
response = client.add_tags(
LoadBalancerNames=[
'Project22hongf',
],
Tags=[
{
'Key': 'Project',
'Value': '2.2'
},
]
)
#Create Launch Configuration
print 'Creating LC'
lc = LaunchConfiguration(name='Project22', image_id=DC_IMAGE, key_name=KEY_NAME, instance_type=DC_TYPE, instance_monitoring = True, security_groups=['All_Traffic'])
asg_conn.create_launch_configuration(lc)
print 'LC created'
#Create Auto Scaling Group
print 'Creating ASG'
asg = AutoScalingGroup(group_name='Project22group', load_balancers=['Project22hongf'], health_check_type = 'ELB', health_check_period = '119', desired_capacity = 5, availability_zones=['us-east-1c'], launch_config = lc, min_size = 5, max_size = 5, tags = [boto.ec2.autoscale.tag.Tag(key='Project',value='2.2', resource_id = 'Project22group', propagate_at_launch=True)])
asg_conn.create_auto_scaling_group(asg)
print 'ASG created'
#Create Scaling Policy
print 'Creating Scaling Policy'
scale_out_policy = ScalingPolicy(name = 'scale_out', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = 1, cooldown = 60)
asg_conn.create_scaling_policy(scale_out_policy)
scale_in_policy = ScalingPolicy(name = 'scale_in', adjustment_type = 'ChangeInCapacity', as_name = 'Project22group', scaling_adjustment = -1, cooldown = 60)
示例14: Cloud
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class Cloud(object):
def __init__(self, cloud_config):
self.config = cloud_config
self.all_instances = []
self.failed_launch = False
self.failed_count = 0
self.failed_last_valid_count = 0
self._conn = None
self._as_conn = None
self._lc = None
self._asg = None
self._last_asg_launch_attempt = None
self.maxed = False
self._last_launch_attempt = datetime.datetime.utcnow()
self._initialize()
def _create_connection(self):
LOG.debug("Creating connection for %s" % self.config.name)
self._conn = boto.connect_ec2(self.config.access_id,
self.config.secret_key,
validate_certs=False)
self._conn.host = self.config.cloud_uri
self._conn.port = self.config.cloud_port
def _create_autoscale_connection(self):
LOG.debug("Creating autoscale connection for %s" % self.config.name)
region = RegionInfo(name=self.config.cloud_type,
endpoint=self.config.as_uri)
self._as_conn = AutoScaleConnection(
aws_access_key_id=self.config.access_id,
aws_secret_access_key=self.config.secret_key,
is_secure=True,
port=self.config.as_port,
region=region,
validate_certs=False)
def _create_or_set_launch_configuration(self):
name = self.config.lc_name
if not self._lc:
LOG.debug("Attempting to load launch configuration: %s" % (name))
lc = self._as_conn.get_all_launch_configurations(names=[name])
if len(lc) == 1:
LOG.debug("Launch configuration %s found." % (name))
self._lc = lc[0]
if not self._lc:
#TODO(pdmars): key and security groups are hardcoded for now, gross
if self.config.user_data_file is not None:
user_data_file = self.config.user_data_file
with open(user_data_file) as f:
user_data = f.read()
else:
user_data = None
LOG.debug("Creating launch configuration %s" % name)
LOG.debug("\tname: %s" % name)
LOG.debug("\timage_id: %s" % self.config.image_id)
LOG.debug("\tinstance_type: %s" % self.config.instance_type)
LOG.debug("\tuser_data: %s" % user_data)
self._lc = LaunchConfiguration(
name=name,
image_id=self.config.image_id,
key_name="phantomkey",
security_groups=['default'],
instance_type=self.config.instance_type,
user_data=user_data)
self._as_conn.create_launch_configuration(self._lc)
def _create_or_set_autoscale_group(self):
name = self.config.asg_name
if not self._asg:
LOG.debug("Attempting to load autoscale group: %s" % name)
asg = self._as_conn.get_all_groups(names=[name])
LOG.debug("Autoscale group: %s" % asg)
if len(asg) == 1:
LOG.debug("Autoscale group %s found." % name)
self._asg = asg[0]
if not self._asg:
# TODO(pdmars): more hard coded grossness, for now
try:
cloud_guess = self.config.lc_name.split("@")[1].strip()
except Exception as e:
LOG.warn("Unable to guess cloud for auto scale tags")
LOG.warn("Setting cloud to hotel")
cloud_guess = "hotel"
policy_name_key = "PHANTOM_DEFINITION"
policy_name = "error_overflow_n_preserving"
ordered_clouds_key = "clouds"
n_preserve_key = "minimum_vms"
ordered_clouds = cloud_guess + ":-1"
n_preserve = 0
policy_tag = Tag(connection=self._as_conn, key=policy_name_key,
value=policy_name, resource_id=name)
clouds_tag = Tag(connection=self._as_conn, key=ordered_clouds_key,
value=ordered_clouds, resource_id=name)
npreserve_tag = Tag(connection=self._as_conn, key=n_preserve_key,
value=n_preserve, resource_id=name)
tags = [policy_tag, clouds_tag, npreserve_tag]
zones = [self.config.az]
LOG.debug("Creating autoscale group %s" % name)
LOG.debug("\tname: %s" % name)
LOG.debug("\tavailability_zones: %s" % zones)
#.........这里部分代码省略.........
示例15: __init__
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import create_launch_configuration [as 别名]
class AutoScale:
def __init__(self, args):
"""
Initializing basic variables needed for auto scaling
"""
self.configs = ConfigParser.RawConfigParser()
self.args = args
self.test_props = {}
self.props = {}
self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key)
self.autoscale_connection = AutoScaleConnection(self.args.access_key, self.args.secret_key)
self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key)
self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key)
self.firstInstance = None
self.launchConfiguration = None
self.healthCheck = None
def loadConfigs(self):
"""
FIX ME: Currently doesnt do anything
This method will load the configurations from boto config file if present else will
accept parameters passed by user.
"""
if os.path.isfile("/etc/boto.cfg"):
self.configs.read("/etc/boto.cfg")
conf = self.configs.sections()
self.populateConfigs(conf)
if os.path.isfile("~/.boto"):
self.configs.read("~/.boto")
conf = self.configs.sections()
self.populateConfigs(conf)
print ">>> Loaded configs"
def populateConfigs(self, sections):
for section in sections:
self.boto_props[section] = self.configs.items(section)
for item in self.boto_props[section]:
key, value = item
if not self.props.has_key(key):
self.props[key] = value
def createLaunchConfiguration(self, lc_name, ami_id, key_name):
"""
Creates launch configuration for the auto scaling cluster
"""
self.launchConfiguration = LaunchConfiguration(name = lc_name,
image_id = ami_id,
key_name = key_name)
self.autoscale_connection.create_launch_configuration(self.launchConfiguration)
print ">>> Created launch configuration: " + lc_name
def createAutoScaleGroup(self, asg_name):
"""
Create a Auto scaling group for the auto scaling cluster
"""
autoScalingGroup = AutoScalingGroup(group_name = asg_name,
load_balancers = [self.args.lb_name],
launch_config = self.launchConfiguration,
min_size = self.args.min_size,
max_size = self.args.max_size,
availability_zones = ['us-east-1a'])
self.autoscale_connection.create_auto_scaling_group(autoScalingGroup)
print ">>> Created auto scaling group: " + asg_name
def createTrigger(self, trigger_name, measure, asg_name):
"""
Trigger to spawn new instances as per specific metrics
"""
alarm_actions = []
dimensions = {"AutoScalingGroupName" : asg_name}
policies = self.autoscale_connection.get_all_policies(as_group=self.args.asg_name, policy_names=[self.args.asp_name])
for policy in policies:
alarm_actions.append(policy.policy_arn)
alarm = MetricAlarm(name = trigger_name,
namespace = "AWS/EC2",
metric = measure,
statistic = "Average",
comparison = ">=",
threshold = 50,
period = 60,
unit = "Percent",
evaluation_periods = 2,
alarm_actions = alarm_actions,
dimensions = dimensions)
self.cw_connection.create_alarm(alarm)
print ">>> Created trigger: "+self.args.trigger
def createAutoScalePolicy(self, asp_name):
"""
Creates a Auto scaling policy to Add/Remove a instance from auto scaling cluster
"""
self.autoScalingUpPolicy = ScalingPolicy(name = asp_name+'-up',
adjustment_type = "ChangeInCapacity",
as_name = self.args.asg_name,
scaling_adjustment = 1,
cooldown = 180)
self.autoScalingDownPolicy = ScalingPolicy(name = asp_name+'-down',
#.........这里部分代码省略.........