本文整理汇总了Python中boto.ec2.autoscale.AutoScaleConnection类的典型用法代码示例。如果您正苦于以下问题:Python AutoScaleConnection类的具体用法?Python AutoScaleConnection怎么用?Python AutoScaleConnection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了AutoScaleConnection类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_ebs_optimized_regression
def test_ebs_optimized_regression(self):
c = AutoScaleConnection()
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(
name=lc_name,
image_id='ami-2272864b',
instance_type='t1.micro',
ebs_optimized=True
)
# This failed due to the difference between native Python ``True/False``
# & the expected string variants.
c.create_launch_configuration(lc)
self.addCleanup(c.delete_launch_configuration, lc_name)
示例2: _remove_from_worker_pool
def _remove_from_worker_pool():
"""
Ensures that this instance is shut down, and unregisted from the worker
pool.
"""
# Retrieve the current state of the pool.
pool = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0]
if pool.desired_capacity <= pool.min_size:
return
# Reduce the pool size and shut ourself down.
pool.desired_capacity -= 1
pool.update()
示例3: __init__
def __init__(self,PREFIX='tfound-',ENV='dev',AMI='',TYPE='',SIZE='',
DOMAIN='tfound',SSHKEY='myprivatekey',AWSKEY='',AWSSECRET='',AVAIL_ZONES=["us-east-1a","us-east-1b","us-east-1c","us-east-1d"]):
'''
Shows examples
Create load balancer group 'tfound-dev-web-lb' for web servers, in dev group for tfound:
python control-lb-and-groups.py --createlb --env dev --aws SC --type web
Add an instance to the load balancer group:
python control-lb-and-groups.py --addtolb=true --env dev --aws SC --type web --instance=i-999999
Create launch config using ami ami-fa6b8393 (default), medium sized instance, and Autoscale Group 'tfound-dev-web-group' with a min of 2 instances, max 5, with health check on port 80:
python control-lb-and-groups.py --createlc --ami ami-fa6b8393 --size c1.medium --env dev --aws SC --type web --createag --min 2 --max 5
Triggers/Health checks are hard coded to spawn new instances when total cpu reaches 60 percent or health check fails.
'''
self.PREFIX=PREFIX+DOMAIN+'-'+ENV+'-'+TYPE
self.ENV=ENV
self.AMI=AMI
self.TYPE=TYPE
self.DOMAIN=DOMAIN
self.SIZE=SIZE
self.MIN=MIN
self.MAX=MAX
self.SSHKEY=SSHKEY
self.AWSKEY=AWSKEY
self.AWSSECRET=AWSSECRET
self.AVAIL_ZONES=AVAIL_ZONES
self.LBNAME=self.PREFIX+'-lb'
self.AGNAME=self.PREFIX+'-group'
self.TRNAME=self.PREFIX+'-trigger'
self.LCNAME=self.PREFIX+'-launch_config'
self.asconn=AutoScaleConnection(self.AWSKEY, self.AWSSECRET)
self.elbconn = ELBConnection(aws_access_key_id=AWSKEY,aws_secret_access_key=AWSSECRET)
self.lc = self._buildLaunchConfig()
self.ag = self._buildAutoscaleGroup()
示例4: main
def main():
parser = optparse.OptionParser()
parser.add_option( "-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE" )
(options, args) = parser.parse_args()
logging.info( "Using config file [%s]" % options.config_file )
config = parse_config( options.config_file )
aws_access = config.get("AWS", 'access')
aws_secret = config.get("AWS", 'secret')
logging.debug( "Connecting to AWS with access [%s] and secret [%s]" % ( aws_access, aws_secret ) )
aws_connection = AutoScaleConnection( aws_access, aws_secret )
print "AutoScalingGroups:"
print aws_connection.get_all_groups().__dict__
示例5: autoscale_group_hosts
def autoscale_group_hosts(group_name):
import boto.ec2
from boto.ec2.autoscale import AutoScaleConnection
ec2 = boto.connect_ec2()
conn = AutoScaleConnection()
groups = conn.get_all_groups(names=[])
groups = [ group for group in groups if group.name.startswith(group_name) ]
instance_ids = []
instances = []
for group in groups:
print group.name
instance_ids.extend([i.instance_id for i in group.instances])
instances.extend(ec2.get_only_instances(instance_ids))
return [i.private_ip_address for i in instances], instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
示例6: connect
def connect(self, groupname):
self.ec2 = boto.connect_ec2()
self.cw = CloudWatchConnection()
self.autoscale = AutoScaleConnection()
self.group = self.autoscale.get_all_groups(names=[groupname])[0]
self.instances = len(self.group.instances)
self.desired = self.group.desired_capacity
self.name = groupname
示例7: _is_up_to_date
def _is_up_to_date():
"""
Returns True if this instance is up to date.
"""
# Retrieve instance information.
conn = AutoScaleConnection()
pool = conn.get_all_groups(["LSDA Worker Pool"])[0]
config = conn.get_all_launch_configurations(
names=[pool.launch_config_name])[0]
# Retrive the AMI for this instance and for others.
config_ami = config.image_id
my_ami = urllib.urlopen("http://169.254.169.254/latest/"
"meta-data/ami-id").read()
return config_ami == my_ami
示例8: main
def main():
"""
Main entry point for the automated scaling daemon.
"""
# Configure logging.
logging.basicConfig(
format = "%(asctime)-15s %(levelname)5s %(message)s",
level = logging.INFO
)
# Read configuration.
options = yaml.load(open("config.yaml"))
# Connect to the RabbitMQ cluster.
params = pika.ConnectionParameters(host=options["amqp"])
conn = pika.BlockingConnection(params)
channel = conn.channel()
while True:
# Ensure that we have things stuck in the queue for the given amount
# of time.
for i in xrange(DELAY / 5):
queue_length = get_queue_length(channel, "stable")
logging.info("Queue length: {}".format(queue_length))
if queue_length == 0:
break
time.sleep(5)
else:
# Scale up!
group = AutoScaleConnection().get_all_groups(["LSDA Worker Pool"])[0]
group.desired_capacity = min(
group.desired_capacity + 2, group.max_size)
group.update()
logging.info(
"Triggering increase to {}".format(group.desired_capacity))
time.sleep(300)
# Wait until next polling event.
time.sleep(30)
示例9: __init__
def __init__(self, clc_host, access_id, secret_key, token):
#boto.set_stream_logger('foo')
path='/services/AutoScaling'
port=8773
if clc_host[len(clc_host)-13:] == 'amazonaws.com':
clc_host = clc_host.replace('ec2', 'autoscaling', 1)
path = '/'
reg = None
port=443
reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
if boto.__version__ < '2.6':
self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=token, debug=0)
else:
self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
port=port, path=path, validate_certs=False,
is_secure=True, security_token=token, debug=0)
self.conn.http_connection_kwargs['timeout'] = 30
示例10: _create_autoscale_connection
def _create_autoscale_connection(self):
LOG.debug("Creating autoscale connection for %s" % self.config.name)
region = RegionInfo(name=self.config.cloud_type,
endpoint=self.config.as_uri)
self._as_conn = AutoScaleConnection(
aws_access_key_id=self.config.access_id,
aws_secret_access_key=self.config.secret_key,
is_secure=True,
port=self.config.as_port,
region=region,
validate_certs=False)
示例11: main
def main():
parser = optparse.OptionParser()
parser.add_option("-c", "--config", dest="config_file", help="AutoScale config INI", metavar="FILE")
(options, args) = parser.parse_args()
logging.info("Using config file [%s]" % options.config_file)
config = parse_config(options.config_file)
aws_access = config.get("AWS", "access")
aws_secret = config.get("AWS", "secret")
logging.debug("Connecting to AWS with access [%s] and secret [%s]" % (aws_access, aws_secret))
aws_connection = AutoScaleConnection(aws_access, aws_secret)
lc = boto.ec2.autoscale.launchconfig.LaunchConfiguration(
name=config.get("LaunchConfig", "name"),
image_id=config.get("LaunchConfig", "image"),
key_name=config.get("LaunchConfig", "key"),
user_data=config.get("LaunchConfig", "user_data"),
security_groups=config.get("LaunchConfig", "security_groups"),
instance_type=config.get("LaunchConfig", "instance_type"),
)
logging.info("LC CONFIG = %s" % lc.__dict__)
asg = boto.ec2.autoscale.group.AutoScalingGroup(
group_name=config.get("AutoScaleGroup", "group_name"),
availability_zones=config.get("AutoScaleGroup", "zones"),
min_size=config.get("AutoScaleGroup", "min_instances"),
max_size=config.get("AutoScaleGroup", "max_instances"),
launch_config=lc,
)
print "ASG dict: %s" % asg.__dict__
asg.connection = aws_connection
params = {"AutoScalingGroupName": asg.name}
asg = aws_connection.get_object("DescribeAutoScalingGroups", params, boto.ec2.autoscale.group.AutoScalingGroup)
print asg
示例12: __init__
def __init__(self, args):
"""
Initializing basic variables needed for auto scaling
"""
self.configs = ConfigParser.RawConfigParser()
self.args = args
self.test_props = {}
self.props = {}
self.ec2_connection = EC2Connection(self.args.access_key, self.args.secret_key)
self.autoscale_connection = AutoScaleConnection(self.args.access_key, self.args.secret_key)
self.elb_connection = ELBConnection(self.args.access_key, self.args.secret_key)
self.cw_connection = CloudWatchConnection(self.args.access_key, self.args.secret_key)
self.firstInstance = None
self.launchConfiguration = None
self.healthCheck = None
示例13: launch_auto_scaling
def launch_auto_scaling(stage = 'development'):
config = get_provider_dict()
from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup, LaunchConfiguration, Trigger
conn = AutoScaleConnection(fabric.api.env.conf['AWS_ACCESS_KEY_ID'], fabric.api.env.conf['AWS_SECRET_ACCESS_KEY'], host='%s.autoscaling.amazonaws.com' % config['location'][:-1])
for name, values in config.get(stage, {}).get('autoscale', {}):
if any(group.name == name for group in conn.get_all_groups()):
fabric.api.warn(fabric.colors.orange('Autoscale group %s already exists' % name))
continue
lc = LaunchConfiguration(name = '%s-launch-config' % name, image_id = values['image'], key_name = config['key'])
conn.create_launch_configuration(lc)
ag = AutoScalingGroup(group_name = name, load_balancers = values.get('load-balancers'), availability_zones = [config['location']], launch_config = lc, min_size = values['min-size'], max_size = values['max-size'])
conn.create_auto_scaling_group(ag)
if 'min-cpu' in values and 'max-cpu' in values:
tr = Trigger(name = '%s-trigger' % name, autoscale_group = ag, measure_name = 'CPUUtilization', statistic = 'Average', unit = 'Percent', dimensions = [('AutoScalingGroupName', ag.name)],
period = 60, lower_threshold = values['min-cpu'], lower_breach_scale_increment = '-1', upper_threshold = values['max-cpu'], upper_breach_scale_increment = '2', breach_duration = 60)
conn.create_trigger(tr)
示例14: delete_autoscaling
def delete_autoscaling():
con = AutoScaleConnection(aws_secret_access_key=AWS_SECRET_ACCESS_KEY,
aws_access_key_id=AWS_ACCESS_KEY,
region=RegionInfo(name=REGION,
endpoint='autoscaling.%s.amazonaws.com' % REGION))
print "Deleting autoscaling group.."
group = con.get_all_groups(names=[AUTOSCALING_GROUP_NAME])[0]
print "shutting down instances"
group.shutdown_instances()
time.sleep(LONG_SLEEP_PERIOD)
print "Deleting autoscaling group itself"
con.delete_auto_scaling_group(AUTOSCALING_GROUP_NAME, force_delete=True)
print "Deleting launch configuration"
con.delete_launch_configuration(AUTOSCALING_GROUP_NAME)
con.close()
示例15: __init__
def __init__(self, aws, wait_time_secs, app_name=None,):
"""
Creates the EbsHelper
"""
self.aws = aws
self.ebs = connect_to_region(aws.region, aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token)
self.autoscale = AutoScaleConnection(aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token)
self.s3 = S3Connection(
aws_access_key_id=aws.access_key,
aws_secret_access_key=aws.secret_key,
security_token=aws.security_token,
host=(lambda r: 's3.amazonaws.com' if r == 'us-east-1' else 's3-' + r + '.amazonaws.com')(aws.region))
self.app_name = app_name
self.wait_time_secs = wait_time_secs