本文整理汇总了Python中boto.ec2.autoscale.AutoScaleConnection.terminate_instance方法的典型用法代码示例。如果您正苦于以下问题:Python AutoScaleConnection.terminate_instance方法的具体用法?Python AutoScaleConnection.terminate_instance怎么用?Python AutoScaleConnection.terminate_instance使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类boto.ec2.autoscale.AutoScaleConnection
的用法示例。
在下文中一共展示了AutoScaleConnection.terminate_instance方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import terminate_instance [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
self.access_id = access_id
self.secret_key = secret_key
self.token = token
self.set_endpoint(clc_host)
def set_endpoint(self, endpoint):
#boto.set_stream_logger('scale')
path = '/services/AutoScaling'
reg = RegionInfo(name='eucalyptus', endpoint=endpoint)
port = 8773
if endpoint[len(endpoint)-13:] == 'amazonaws.com':
endpoint = endpoint.replace('ec2', 'autoscaling', 1)
path = '/'
reg = RegionInfo(endpoint=endpoint)
port = 443
self.conn = AutoScaleConnection(self.access_id, self.secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=self.token, debug=0)
self.conn.APIVersion = '2011-01-01'
if not(endpoint[len(endpoint)-13:] == 'amazonaws.com'):
self.conn.auth_region_name = 'Eucalyptus'
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records,
next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
# policy related
def delete_policy(self, policy_name, autoscale_group=None):
return self.conn.delete_policy(policy_name, autoscale_group)
def get_all_policies(self, as_group=None, policy_names=None, max_records=None, next_token=None):
obj = self.conn.get_all_policies(as_group, policy_names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Policies.json")
return obj
def execute_policy(self, policy_name, as_group=None, honor_cooldown=None):
return self.conn.execute_policy(policy_name, as_group, honor_cooldown)
def create_scaling_policy(self, scaling_policy):
return self.conn.create_scaling_policy(scaling_policy)
#.........这里部分代码省略.........
示例2: IcsAS
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import terminate_instance [as 别名]
#.........这里部分代码省略.........
group.update()
if self.get_launch_config_from_name(new_lc_name):
group = self.get_group_from_name(name)[0]
if group.launch_config_name == new_lc_name:
return self.delete_launch_config_from_name(old_lc_name)
else:
raise IcsASException("failed to update " +
"launch config for ASG '%s'"
% name)
else:
raise IcsASException("no such new launch config '%s'"
% new_lc_name)
def suspend_scaling_group(self, name, scaling_processes=None):
"""
Suspends Auto Scaling processes for an Auto Scaling group.
:type name: string
:param name: the ASG name
:type scaling_processes: string or list
:param scaling_processes: scaling process names
* Launch
* Terminate
* HealthCheck
* ReplaceUnhealthy
* AZRebalance
* AlarmNotification
* ScheduledActions
* AddToLoadBalancer
"""
if not isinstance(name, basestring):
return None
group = self.get_group_from_name(self.to_list(name))[0]
return group.suspend_processes(self.to_list(scaling_processes))
def resume_scaling_group(self, name, scaling_processes=None):
"""
Resumes Auto Scaling processes for an Auto Scaling group.
:type name: string
:param name: the ASG name
:type scaling_processes: string or list
:param scaling_processes: scaling process names
* Launch
* Terminate
* HealthCheck
* ReplaceUnhealthy
* AZRebalance
* AlarmNotification
* ScheduledActions
* AddToLoadBalancer
"""
if not isinstance(name, basestring):
return None
group = self.get_group_from_name(self.to_list(name))[0]
return group.resume_processes(self.to_list(scaling_processes))
def terminate_group_instance(self, instance_id, decrement_capacity=True):
"""
Terminates the specified instance. The desired group size can
also be adjusted, if desired.
:type instance_id: str
:param instance_id: The ID of the instance to be terminated.
:type decrement_capability: bool
:param decrement_capacity: Whether to decrement the size of the
autoscaling group or not.
"""
return self.conn.terminate_instance(
instance_id=instance_id,
decrement_capacity=decrement_capacity)
def update_instance_health(self, instance_id, health_status,
grace_period=False):
"""
Explicitly set the health status of an instance.
:type instance_id: str
:param instance_id: The identifier of the EC2 instance
:type health_status: str
:param health_status: The health status of the instance.
* Healthy: the instance is healthy and should remain in service.
* Unhealthy: the instance is unhealthy. \
Auto Scaling should terminate and replace it.
:type grace_period: bool
:param grace_period: If True, this call should respect
the grace period associated with the group.
"""
self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period=grace_period)
示例3: BotoScaleInterface
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import terminate_instance [as 别名]
class BotoScaleInterface(ScaleInterface):
conn = None
saveclcdata = False
def __init__(self, clc_host, access_id, secret_key, token):
#boto.set_stream_logger('foo')
path='/services/AutoScaling'
port=8773
if clc_host[len(clc_host)-13:] == 'amazonaws.com':
clc_host = clc_host.replace('ec2', 'autoscaling', 1)
path = '/'
reg = None
port=443
reg = RegionInfo(name='eucalyptus', endpoint=clc_host)
self.conn = AutoScaleConnection(access_id, secret_key, region=reg,
port=port, path=path,
is_secure=True, security_token=token, debug=0)
self.conn.https_validate_certificates = False
self.conn.http_connection_kwargs['timeout'] = 30
def __save_json__(self, obj, name):
f = open(name, 'w')
json.dump(obj, f, cls=BotoJsonScaleEncoder, indent=2)
f.close()
##
# autoscaling methods
##
def create_auto_scaling_group(self, as_group):
return self.conn.create_auto_scaling_group(as_group)
def delete_auto_scaling_group(self, name, force_delete=False):
return self.conn.delete_auto_scaling_group(name, force_delete)
def get_all_groups(self, names=None, max_records=None, next_token=None):
obj = self.conn.get_all_groups(names, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Groups.json")
return obj
def get_all_autoscaling_instances(self, instance_ids=None, max_records=None, next_token=None):
obj = self.conn.get_all_autoscaling_instances(instance_ids, max_records, next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_Instances.json")
return obj
def set_desired_capacity(self, group_name, desired_capacity, honor_cooldown=False):
group = self.conn.get_all_groups([group_name])[0];
# notice, honor_cooldown not supported.
return group.set_capacity(desired_capacity)
def set_instance_health(self, instance_id, health_status, should_respect_grace_period=True):
return self.conn.set_instance_health(instance_id, health_status,
should_respect_grace_period)
def terminate_instance(self, instance_id, decrement_capacity=True):
return self.conn.terminate_instance(instance_id, decrement_capacity)
def update_autoscaling_group(self, as_group):
as_group.connection = self.conn
return as_group.update()
def create_launch_configuration(self, launch_config):
return self.conn.create_launch_configuration(launch_config)
def delete_launch_configuration(self, launch_config_name):
return self.conn.delete_launch_configuration(launch_config_name)
def get_all_launch_configurations(self, config_names, max_records, next_token):
obj = self.conn.get_all_launch_configurations(names=config_names, max_records=max_records, next_token=next_token)
if self.saveclcdata:
self.__save_json__(obj, "mockdata/AS_LaunchConfigs.json")
return obj
示例4: Cloud
# 需要导入模块: from boto.ec2.autoscale import AutoScaleConnection [as 别名]
# 或者: from boto.ec2.autoscale.AutoScaleConnection import terminate_instance [as 别名]
#.........这里部分代码省略.........
self.maxed = True
else:
self.maxed = False
def _refresh_asg(self):
LOG.debug("%s: refreshing autoscale group" % self.config.name)
asg_name = self.config.asg_name
asgs = self._as_conn.get_all_groups(names=[asg_name])
if len(asgs) == 1:
self._asg = asgs[0]
LOG.debug("\trefreshed autoscale group: %s" % asg_name)
else:
LOG.warn("\tunable to refresh autoscale group: %s" % asg_name)
def refresh(self, cluster):
self._refresh_asg()
self._refresh_instances()
def get_total_num_valid_cores(self):
LOG.debug("%s: getting number of valid cores" % self.config.name)
total_num_valid_cores = 0
num_valid_instances = len(self.get_valid_instances())
total_valid_cores = num_valid_instances * self.config.instance_cores
num_desired_instances = self._asg.desired_capacity
num_desired_cores = num_desired_instances * self.config.instance_cores
if num_desired_cores != total_num_valid_cores:
LOG.debug("\tmismatching core counts")
LOG.debug("\tnum_desired_cores: %d" % (num_desired_cores))
LOG.debug("\ttotal_valid_cores: %d" % (total_valid_cores))
return total_valid_cores
def get_instance_by_id(self, id):
LOG.debug("Searching for instance %s" % id)
for instances in self.all_instances:
if instance.id == id:
LOG.debug("Found instance %s" % id)
return instance
return None
def get_instance_ids_for_public_dns_names(self, public_dns_names):
instance_ids = []
for instance in self.all_instances:
if instance.public_dns_name in public_dns_names:
instance_ids.append(instance.id)
return instance_ids
def get_public_dns_names_close_to_charge(self):
instances_close_to_charge = []
sleep_secs = self.config.get_loop_sleep_secs()
cur_utc_time = datetime.datetime.utcnow()
valid_instances = self.get_valid_instances()
time_fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
for instance in valid_instances:
launch_time = datetime.datetime.strptime(instance.launch_time,
time_fmt)
time_diff = cur_utc_time - launch_time
# Ignores microseconds
time_diff_secs = time_diff.seconds + time_diff.days * 24 * 3600
cur_charge_secs = time_diff_secs % self.config.charge_time_secs
secs_to_charge = self.config.charge_time_secs - cur_charge_secs
LOG.debug("%s:%s: charge: %d; current: %d; to charge: %d" % (
instance.id, instance.public_dns_name,
self.config.charge_time_secs,
cur_charge_secs, secs_to_charge))
if secs_to_charge < (3 * sleep_secs):
instances_close_to_charge.append(instance.public_dns_name)
return instances_close_to_charge
def delete_instances(self, instance_ids=[]):
if not instance_ids:
return
LOG.debug("Deleting instances: %s" % instance_ids)
# TODO(pdmars): this has the potential to kill instances running jobs
# maybe I should err on the side of having extra instances if the
# capacity is higher than the cloud can currently support
num_instances = len(self.all_instances)
if ((self._asg.desired_capacity > num_instances) and
(num_instances > 0)):
LOG.warn("Desired capacity is greater than num_instances running")
LOG.warn("Adjusting desired capacity to match")
self.set_capacity(num_instances)
for instance_id in instance_ids:
self._as_conn.terminate_instance(instance_id)
# TODO(pdmars): due to a bug in phantom, maybe this will help
# 2013/04/05: this might not be relevant anymore
time.sleep(.1)
def launch_autoscale_instances(self, num_instances=1):
new_capacity = self._asg.desired_capacity + int(num_instances)
if new_capacity > self.config.max_instances:
new_capacity = self.config.max_instances
LOG.warn("%s can launch %s total instances" % (self.config.name,
new_capacity))
self._last_launch_attempt = datetime.datetime.utcnow()
LOG.debug("Setting cloud capacity for %s to %s" % (self.config.name,
new_capacity))
self.set_capacity(new_capacity)
def set_capacity(self, new_capacity):
self._asg.set_capacity(new_capacity)