本文整理汇总了Python中trove.instance.models.Instance类的典型用法代码示例。如果您正苦于以下问题:Python Instance类的具体用法?Python Instance怎么用?Python Instance使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Instance类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_shard_cluster
def _add_shard_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
shard_id=shard_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in shard %s: %s" % (shard_id,
instance_ids))
if not self._all_instances_ready(instance_ids, cluster_id,
shard_id):
return
members = [Instance.load(context, instance_id)
for instance_id in instance_ids]
db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
type='query_router',
deleted=False).all()
query_routers = [Instance.load(context, db_query_router.id)
for db_query_router in db_query_routers]
if not self._create_shard(query_routers[0], members):
return
for member in members:
self.get_guest(member).cluster_complete()
示例2: _shrink_cluster
def _shrink_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
all_instance_ids = [db_instance.id for db_instance in db_instances]
remove_instances = [Instance.load(context, instance_id)
for instance_id in instance_ids]
left_instances = [Instance.load(context, instance_id)
for instance_id
in all_instance_ids
if instance_id not in instance_ids]
remove_member_ips = [self.get_ip(instance)
for instance in remove_instances]
k = VerticaCluster.k_safety(len(left_instances))
for db_instance in db_instances:
if db_instance['type'] == 'master':
master_instance = Instance.load(context,
db_instance.id)
if self.get_ip(master_instance) in remove_member_ips:
raise RuntimeError(_("Cannot remove master instance!"))
LOG.debug(_("Marking cluster k-safety: %s") % k)
self.get_guest(master_instance).mark_design_ksafe(k)
self.get_guest(master_instance).shrink_cluster(
remove_member_ips)
break
for r in remove_instances:
Instance.delete(r)
示例3: test_create_sg_ha
def test_create_sg_ha(self):
name = "NAME"
flavor_id = "flavor_id"
image_id = "image_id"
databases = "databases"
users = "users"
service_type = "mysql"
volume_size = "10"
backup_id = "backup_id"
master_id = "master_id"
extend = ""
when(KSC_Instance)._create_instance(any(), any(),
any(), any(), any(), any(),
any(), any(), any(), any(), any(), any()).thenReturn(None)
instance_type = InstanceType.SG
self.assertEqual(None,KSC_Instance.create(self.context, name, flavor_id,
image_id, databases, users, service_type,
volume_size, backup_id,instance_type,extend,master_id))
instance_type = InstanceType.HA
self.assertEqual(None,KSC_Instance.create(self.context, name, flavor_id,
image_id, databases, users, service_type,
volume_size, backup_id,instance_type,extend,master_id))
示例4: _grow_cluster
def _grow_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
cluster_head = next(Instance.load(context, db_inst.id)
for db_inst in db_instances
if db_inst.id not in new_instance_ids)
if not cluster_head:
raise TroveError("Unable to determine existing Redis cluster "
"member")
(cluster_head_ip, cluster_head_port) = (
self.get_guest(cluster_head).get_node_ip())
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(new_instance_ids, cluster_id):
return
LOG.debug("All members ready, proceeding for cluster setup.")
new_insts = [Instance.load(context, instance_id)
for instance_id in new_instance_ids]
new_guests = map(self.get_guest, new_insts)
# Connect nodes to the cluster head
for guest in new_guests:
guest.cluster_meet(cluster_head_ip, cluster_head_port)
for guest in new_guests:
guest.cluster_complete()
示例5: _shrink_cluster
def _shrink_cluster():
cluster_node_ids = self.find_cluster_node_ids(cluster_id)
cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)
removed_nodes = CassandraClusterTasks.load_cluster_nodes(context, removal_ids)
LOG.debug("All nodes ready, proceeding with cluster setup.")
# Update the list of seeds on remaining nodes if necessary.
# Once all nodes are configured, decommission the removed nodes.
# Cassandra will stream data from decommissioned nodes to the
# remaining ones.
try:
# All nodes should have the same seeds.
# We retrieve current seeds from the first node.
test_node = self.load_cluster_nodes(context, cluster_node_ids[:1])[0]
current_seeds = test_node["guest"].get_seeds()
# The seeds will have to be updated on all remaining instances
# if any of the seed nodes is going to be removed.
update_seeds = any(node["ip"] in current_seeds for node in removed_nodes)
LOG.debug("Decommissioning removed nodes.")
for node in removed_nodes:
node["guest"].node_decommission()
node["instance"].update_db(cluster_id=None)
# Recompute the seed nodes based on the updated cluster
# geometry if any of the existing seed nodes was removed.
if update_seeds:
LOG.debug("Updating seeds on the remaining nodes.")
cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)
remaining_nodes = [node for node in cluster_nodes if node not in removed_nodes]
seeds = self.choose_seed_nodes(remaining_nodes)
LOG.debug("Selected seed nodes: %s" % seeds)
for node in remaining_nodes:
LOG.debug("Configuring node: %s." % node["id"])
node["guest"].set_seeds(seeds)
# Wait for the removed nodes to go SHUTDOWN.
LOG.debug("Waiting for all decommissioned nodes to shutdown.")
if not self._all_instances_shutdown(removal_ids, cluster_id):
# Now detached, failed nodes will stay available
# in the list of standalone instances.
return
# Delete decommissioned instances only when the cluster is in a
# consistent state.
LOG.debug("Deleting decommissioned instances.")
for node in removed_nodes:
Instance.delete(node["instance"])
LOG.debug("Cluster configuration finished successfully.")
except Exception:
LOG.exception(_("Error shrinking cluster."))
self.update_statuses_on_failure(cluster_id)
示例6: _grow_cluster
def _grow_cluster():
new_instances = [db_instance for db_instance in self.db_instances
if db_instance.id in instance_ids]
new_members = [db_instance for db_instance in new_instances
if db_instance.type == 'member']
new_query_routers = [db_instance for db_instance in new_instances
if db_instance.type == 'query_router']
instances = []
if new_members:
shard_ids = set([db_instance.shard_id for db_instance
in new_members])
query_router_id = self._get_running_query_router_id()
if not query_router_id:
return
for shard_id in shard_ids:
LOG.debug('growing cluster by adding shard %s on query '
'router %s' % (shard_id, query_router_id))
member_ids = [db_instance.id for db_instance in new_members
if db_instance.shard_id == shard_id]
if not self._all_instances_ready(
member_ids, cluster_id, shard_id
):
return
members = [Instance.load(context, member_id)
for member_id in member_ids]
query_router = Instance.load(context, query_router_id)
if not self._create_shard(query_router, members):
return
instances.extend(members)
if new_query_routers:
query_router_ids = [db_instance.id for db_instance
in new_query_routers]
config_servers_ids = [db_instance.id for db_instance
in self.db_instances
if db_instance.type == 'config_server']
LOG.debug('growing cluster by adding query routers %s, '
'with config servers %s'
% (query_router_ids, config_servers_ids))
if not self._all_instances_ready(
query_router_ids, cluster_id
):
return
query_routers = [Instance.load(context, instance_id)
for instance_id in query_router_ids]
config_servers_ips = [
self.get_ip(Instance.load(context, config_server_id))
for config_server_id in config_servers_ids
]
if not self._add_query_routers(
query_routers, config_servers_ips,
admin_password=self.get_cluster_admin_password(context)
):
return
instances.extend(query_routers)
for instance in instances:
self.get_guest(instance).cluster_complete()
示例7: _shrink_cluster
def _shrink_cluster():
removal_instances = [Instance.load(context, instance_id)
for instance_id in removal_instance_ids]
for instance in removal_instances:
Instance.delete(instance)
# wait for instances to be deleted
def all_instances_marked_deleted():
non_deleted_instances = DBInstance.find_all(
cluster_id=cluster_id, deleted=False).all()
non_deleted_ids = [db_instance.id for db_instance
in non_deleted_instances]
return not bool(
set(removal_instance_ids).intersection(
set(non_deleted_ids))
)
try:
LOG.info(_("Deleting instances (%s)") % removal_instance_ids)
utils.poll_until(all_instances_marked_deleted,
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
except PollTimeOut:
LOG.error(_("timeout for instances to be marked as deleted."))
return
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
leftover_instances = [Instance.load(context, db_inst.id)
for db_inst in db_instances
if db_inst.id not in removal_instance_ids]
leftover_cluster_ips = [self.get_ip(instance) for instance in
leftover_instances]
# Get config changes for left over instances
rnd_cluster_guest = self.get_guest(leftover_instances[0])
cluster_context = rnd_cluster_guest.get_cluster_context()
# apply the new config to all leftover instances
for instance in leftover_instances:
guest = self.get_guest(instance)
# render the conf.d/cluster.cnf configuration
cluster_configuration = self._render_cluster_config(
context,
instance,
",".join(leftover_cluster_ips),
cluster_context['cluster_name'],
cluster_context['replication_user'])
guest.write_cluster_configuration_overrides(
cluster_configuration)
示例8: _create_cluster
def _create_cluster():
# Fetch instances by cluster_id against instances table.
db_instances = DBInstance.find_all(cluster_id=cluster_id,
deleted=False).all()
instance_ids = [db_instance.id for db_instance in db_instances]
# Wait for cluster members to get to cluster-ready status.
if not self._all_instances_ready(instance_ids, cluster_id):
return
LOG.debug("All members ready, proceeding for cluster setup.")
instances = [Instance.load(context, instance_id) for instance_id
in instance_ids]
member_ips = [self.get_ip(instance) for instance in instances]
guests = [self.get_guest(instance) for instance in instances]
# Users to be configured for password-less SSH.
authorized_users_without_password = ['root', 'dbadmin']
# Configuring password-less SSH for cluster members.
# Strategy for setting up SSH:
# get public keys for user from member-instances in cluster,
# combine them, finally push it back to all instances,
# and member instances add them to authorized keys.
LOG.debug("Configuring password-less SSH on cluster members.")
try:
for user in authorized_users_without_password:
pub_key = [guest.get_public_keys(user) for guest in guests]
for guest in guests:
guest.authorize_public_keys(user, pub_key)
LOG.debug("Installing cluster with members: %s." % member_ips)
for db_instance in db_instances:
if db_instance['type'] == 'master':
master_instance = Instance.load(context,
db_instance.id)
self.get_guest(master_instance).install_cluster(
member_ips)
break
LOG.debug("Finalizing cluster configuration.")
for guest in guests:
guest.cluster_complete()
except Exception:
LOG.exception(_("Error creating cluster."))
self.update_statuses_on_failure(cluster_id)
示例9: _create_instances
def _create_instances(context, db_info, datastore, datastore_version,
instances, extended_properties, locality):
member_config = {"id": db_info.id,
"instance_type": "member"}
name_index = 1
for instance in instances:
if not instance.get("name"):
instance['name'] = "%s-member-%s" % (db_info.name,
str(name_index))
name_index += 1
return map(lambda instance:
Instance.create(context,
instance['name'],
instance['flavor_id'],
datastore_version.image_id,
[], [],
datastore, datastore_version,
instance.get('volume_size', None),
None,
availability_zone=instance.get(
'availability_zone', None),
nics=instance.get('nics', None),
configuration_id=None,
cluster_config=member_config,
locality=locality
),
instances)
示例10: test_create_sg_ha
def test_create_sg_ha(self):
name = "NAME"
flavor_id = "flavor_id"
image_id = "image_id"
databases = "databases"
users = "users"
service_type = "mysql"
volume_size = "10"
backup_id = None #"backup_id"
master_id = "master_id"
extend = { "autobackup_at": 2300,
"duration": 1440,
"expire_after": 7,
"admin_user": "master",
"admin_password": "kingsoft",
"port": "3306"}
#tempalte_config_id = "0c4a1148-5cfd-463a-b205-a0b7d3d2ebd6"
tempalte_config_id = None
#when(KSC_Instance)._create_instance(any(), any(),
# any(), any(), any(), any(),
# any(), any(), any(), any(), any(), any()).thenReturn(None)
when(KSC_Instance)._check_flavor(any(),any()).thenReturn({'image_id':"123"})
instance_type = InstanceType.SG
self.assertEqual(None,KSC_Instance.create(self.context, name, flavor_id,
image_id, databases, users, service_type,
volume_size, backup_id,instance_type,tempalte_config_id,extend,master_id))
示例11: _create_instances
def _create_instances(context, db_info, datastore, datastore_version, instances):
member_config = {"id": db_info.id, "instance_type": "member"}
name_index = 1
for instance in instances:
if not instance.get("name"):
instance["name"] = "%s-member-%s" % (db_info.name, str(name_index))
name_index += 1
return map(
lambda instance: Instance.create(
context,
instance["name"],
instance["flavor_id"],
datastore_version.image_id,
[],
[],
datastore,
datastore_version,
instance.get("volume_size", None),
None,
availability_zone=instance.get("availability_zone", None),
nics=instance.get("nics", None),
configuration_id=None,
cluster_config=member_config,
),
instances,
)
示例12: _create_resources
def _create_resources():
# parse the ID from the Ref
instance_id = utils.get_id_from_href(instance)
# verify that the instance exists and can perform actions
from trove.instance.models import Instance
instance_model = Instance.load(context, instance_id)
instance_model.validate_can_perform_action()
cls.verify_swift_auth_token(context)
try:
db_info = DBBackup.create(name=name,
description=description,
tenant_id=context.tenant,
state=BackupState.NEW,
instance_id=instance_id,
deleted=False)
except exception.InvalidModelError as ex:
LOG.exception("Unable to create Backup record:")
raise exception.BackupCreationError(str(ex))
backup_info = {'id': db_info.id,
'name': name,
'description': description,
'instance_id': instance_id,
'backup_type': db_info.backup_type,
'checksum': db_info.checksum,
}
api.API(context).create_backup(backup_info, instance_id)
return db_info
示例13: _grow_cluster
def _grow_cluster():
# Wait for new nodes to get to cluster-ready status.
LOG.debug("Waiting for new nodes to become ready.")
if not self._all_instances_ready(new_instance_ids, cluster_id):
return
new_instances = [Instance.load(context, instance_id)
for instance_id in new_instance_ids]
added_nodes = [self.build_node_info(instance)
for instance in new_instances]
LOG.debug("All nodes ready, proceeding with cluster setup.")
cluster_node_ids = self.find_cluster_node_ids(cluster_id)
cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)
# Rebalance the cluster via one of the existing nodes.
# Clients can continue to store and retrieve information and
# do not need to be aware that a rebalance operation is taking
# place.
# The new nodes are marked active only if the rebalancing
# completes.
try:
coordinator = cluster_nodes[0]
self._add_nodes(coordinator, added_nodes)
LOG.debug("Cluster configuration finished successfully.")
except Exception:
LOG.exception(_("Error growing cluster."))
self.update_statuses_on_failure(cluster_id)
示例14: test_upgrade_ha
def test_upgrade_ha(self):
instance_type = InstanceType.STANDBY
instance = fake()
instance.id = 1
instance.context = self.context
instance.name = 'name'
instance.flavor_id = 'flavor_id'
instance.service_type = 'service_type'
instance.volume_size = 'volume_size'
instance.group_id = 'group_id'
when(KSC_Instance)._create_instance(any(), any(),
any(), any(), any(), any(),
any(), any(), any(), any(), any(), any()).thenReturn(None)
group_item = fake()
group_item.id = 1
group_item.group_id = 1
group_item.type = DBInstanceType.SINGLE
when(InstanceGroupItem).get_by_instance_id(any(), any()).thenReturn(group_item)
service = {'image_id':''}
when(ServiceImage).find_by(service_name=any()).thenReturn(service)
when(Backup).get_latest_backup(any(), group_id =any()).thenReturn(None)
when(KSC_Instance).is_service_active(any(), instance_id=any()).thenReturn(None)
when(KSC_Instance).is_lastbackup_ready(any(), instance_id=any()).thenReturn(None)
self.assertEqual(None,KSC_Instance.upgrade_ha(instance))
示例15: _grow_cluster
def _grow_cluster():
# Wait for new nodes to get to cluster-ready status.
LOG.debug("Waiting for new nodes to become ready.")
if not self._all_instances_ready(new_instance_ids, cluster_id):
return
new_instances = [Instance.load(context, instance_id)
for instance_id in new_instance_ids]
add_node_info = [self.build_node_info(instance)
for instance in new_instances]
LOG.debug("All nodes ready, proceeding with cluster setup.")
cluster_node_ids = self.find_cluster_node_ids(cluster_id)
cluster_nodes = self.load_cluster_nodes(context, cluster_node_ids)
old_node_info = [node for node in cluster_nodes
if node['id'] not in new_instance_ids]
# Rebalance the cluster via one of the existing nodes.
# Clients can continue to store and retrieve information and
# do not need to be aware that a rebalance operation is taking
# place.
coordinator = old_node_info[0]
self._add_nodes(coordinator, add_node_info)
LOG.debug("Cluster grow finished successfully.")