本文整理汇总了Python中aquilon.worker.templates.base.Plenary类的典型用法代码示例。如果您正苦于以下问题:Python Plenary类的具体用法?Python Plenary怎么用?Python Plenary使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Plenary类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, dbmetacluster, logger=LOGGER):
Plenary.__init__(self, dbmetacluster, logger=logger)
self.name = dbmetacluster.name
self.loadpath = self.dbobj.personality.archetype.name
self.plenary_core = "clusters"
self.plenary_template = dbmetacluster.name
示例2: render
def render(self, session, logger, hostname, service, instance, **arguments):
dbhost = hostname_to_host(session, hostname)
dbservice = Service.get_unique(session, service, compel=True)
msg = "Service %s" % service
if instance:
dbinstances = [get_service_instance(session, dbservice, instance)]
msg = "Service %s, instance %s" % (service, instance)
else:
q = session.query(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.join('servers')
q = q.filter_by(host=dbhost)
dbinstances = q.all()
for dbinstance in dbinstances:
if dbhost in dbinstance.server_hosts:
if (dbinstance.client_count > 0 and
len(dbinstance.server_hosts) <= 1):
logger.warning("WARNING: Server %s, is the last server "
"bound to %s which still has clients" %
(hostname, msg))
dbinstance.server_hosts.remove(dbhost)
session.expire(dbhost, ['_services_provided'])
session.flush()
plenaries = PlenaryCollection(logger=logger)
plenaries.append(Plenary.get_plenary(dbhost))
for dbinstance in dbinstances:
plenaries.append(Plenary.get_plenary(dbinstance))
plenaries.write()
return
示例3: render
def render(self, session, logger, service, instance, position, hostname,
cluster, ip, resourcegroup, service_address, alias, **arguments):
dbservice = Service.get_unique(session, service, compel=True)
if instance:
dbsi = ServiceInstance.get_unique(session, service=dbservice,
name=instance, compel=True)
dbinstances = [dbsi]
else:
# --position for multiple service instances sounds dangerous, so
# disallow it until a real usecase emerges
if position:
raise ArgumentError("The --position option can only be "
"specified for one service instance.")
q = session.query(ServiceInstance)
q = q.filter_by(service=dbservice)
dbinstances = q.all()
plenaries = PlenaryCollection(logger=logger)
if position is not None:
params = None
else:
params = lookup_target(session, plenaries, hostname, ip, cluster,
resourcegroup, service_address, alias)
for dbinstance in dbinstances:
if position is not None:
if position < 0 or position >= len(dbinstance.servers):
raise ArgumentError("Invalid server position.")
dbsrv = dbinstance.servers[position]
if dbsrv.host:
plenaries.append(Plenary.get_plenary(dbsrv.host))
if dbsrv.cluster:
plenaries.append(Plenary.get_plenary(dbsrv.cluster))
else:
dbsrv = find_server(dbinstance, params)
if not dbsrv:
if instance:
raise NotFoundException("No such server binding.")
continue
plenaries.append(Plenary.get_plenary(dbinstance))
if dbsrv.host:
session.expire(dbsrv.host, ['services_provided'])
if dbsrv.cluster:
session.expire(dbsrv.cluster, ['services_provided'])
dbinstance.servers.remove(dbsrv)
if dbinstance.client_count > 0 and not dbinstance.servers:
logger.warning("Warning: {0} was left without servers, "
"but it still has clients.".format(dbinstance))
session.flush()
plenaries.write()
return
示例4: render
def render(self, session, logger, city, timezone, campus,
default_dns_domain, comments, **arguments):
dbcity = get_location(session, city=city)
# Updating machine templates is expensive, so only do that if needed
update_machines = False
if timezone is not None:
dbcity.timezone = timezone
if comments is not None:
dbcity.comments = comments
if default_dns_domain is not None:
if default_dns_domain:
dbdns_domain = DnsDomain.get_unique(session, default_dns_domain,
compel=True)
dbcity.default_dns_domain = dbdns_domain
else:
dbcity.default_dns_domain = None
prev_campus = None
dsdb_runner = None
dsdb_runner = DSDBRunner(logger=logger)
if campus is not None:
dbcampus = get_location(session, campus=campus)
# This one would change the template's locations hence forbidden
if dbcampus.hub != dbcity.hub:
# Doing this both to reduce user error and to limit
# testing required.
raise ArgumentError("Cannot change campus. {0} is in {1:l}, "
"while {2:l} is in {3:l}.".format(
dbcampus, dbcampus.hub,
dbcity, dbcity.hub))
if dbcity.campus:
prev_campus = dbcity.campus
dbcity.update_parent(parent=dbcampus)
update_machines = True
session.flush()
if campus is not None:
if prev_campus:
prev_name = prev_campus.name
else:
prev_name = None
dsdb_runner.update_city(city, dbcampus.name, prev_name)
plenaries = PlenaryCollection(logger=logger)
plenaries.append(Plenary.get_plenary(dbcity))
if update_machines:
q = session.query(Machine)
q = q.filter(Machine.location_id.in_(dbcity.offspring_ids()))
logger.client_info("Updating %d machines..." % q.count())
for dbmachine in q:
plenaries.append(Plenary.get_plenary(dbmachine))
count = plenaries.write()
dsdb_runner.commit_or_rollback()
logger.client_info("Flushed %d templates." % count)
示例5: render
def render(self, session, logger, service, instance, comments,
**arguments):
dbservice = session.query(Service).filter_by(name=service).first()
if dbservice and instance is None:
raise ArgumentError("Service %s already exists." % dbservice.name)
if not dbservice:
# "add_service --service foo --comments blah" should add the comments
# to Service,
# "add_service --service foo --instance bar --comments blah" should
# add the comments to ServiceInstance
if instance:
srvcomments = None
else:
srvcomments = comments
dbservice = Service(name=service, comments=srvcomments)
session.add(dbservice)
plenaries = PlenaryCollection(logger=logger)
plenaries.append(Plenary.get_plenary(dbservice))
if instance:
ServiceInstance.get_unique(session, service=dbservice,
name=instance, preclude=True)
dbsi = ServiceInstance(service=dbservice, name=instance,
comments=comments)
session.add(dbsi)
plenaries.append(Plenary.get_plenary(dbsi))
session.flush()
plenaries.write()
return
示例6: render
def render(self, session, logger, cluster, buildstatus, **arguments):
dbcluster = Cluster.get_unique(session, cluster, compel=True)
dbstatus = ClusterLifecycle.get_unique(
session, buildstatus, compel=True)
if not dbcluster.status.transition(dbcluster, dbstatus):
return
if not dbcluster.personality.archetype.is_compileable:
return
session.flush()
plenaries = PlenaryCollection(logger=logger)
plenaries.append(Plenary.get_plenary(dbcluster))
for dbhost in dbcluster.hosts:
plenaries.append(Plenary.get_plenary(dbhost))
# Force a host lock as pan might overwrite the profile...
key = CompileKey(domain=dbcluster.branch.name, logger=logger)
try:
lock_queue.acquire(key)
plenaries.write(locked=True)
td = TemplateDomain(
dbcluster.branch, dbcluster.sandbox_author, logger=logger)
td.compile(session, plenaries.object_templates, locked=True)
except:
plenaries.restore_stash()
raise
finally:
lock_queue.release(key)
return
示例7: __init__
def __init__(self, dbhost, logger=LOGGER):
Plenary.__init__(self, dbhost, logger=logger)
# Store the branch separately so get_key() works even after the dbhost
# object has been deleted
self.branch = dbhost.branch
self.name = dbhost.fqdn
self.plenary_core = "hostdata"
self.plenary_template = self.name
示例8: render
def render(self, session, logger, switch, **arguments):
dbswitch = Switch.get_unique(session, switch, compel=True)
# Check and complain if the switch has any other addresses than its
# primary address
addrs = []
for addr in dbswitch.all_addresses():
if addr.ip == dbswitch.primary_ip:
continue
addrs.append(str(addr.ip))
if addrs:
raise ArgumentError("{0} still provides the following addresses, "
"delete them first: {1}.".format
(dbswitch, ", ".join(addrs)))
dbdns_rec = dbswitch.primary_name
ip = dbswitch.primary_ip
old_fqdn = str(dbswitch.primary_name.fqdn)
old_comments = dbswitch.comments
session.delete(dbswitch)
if dbdns_rec:
delete_dns_record(dbdns_rec)
session.flush()
# Any switch ports hanging off this switch should be deleted with
# the cascade delete of the switch.
switch_plenary = Plenary.get_plenary(dbswitch, logger=logger)
# clusters connected to this switch
plenaries = PlenaryCollection(logger=logger)
for dbcluster in dbswitch.esx_clusters:
plenaries.append(Plenary.get_plenary(dbcluster))
key = CompileKey.merge([switch_plenary.get_remove_key(),
plenaries.get_write_key()])
try:
lock_queue.acquire(key)
switch_plenary.stash()
plenaries.write(locked=True)
switch_plenary.remove(locked=True)
if ip:
dsdb_runner = DSDBRunner(logger=logger)
# FIXME: restore interface name/MAC on rollback
dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments)
dsdb_runner.commit_or_rollback("Could not remove switch from DSDB")
return
except:
plenaries.restore_stash()
switch_plenary.restore_stash()
raise
finally:
lock_queue.release(key)
示例9: render
def render(self, session, logger, machine, disk, controller, size, all,
dbuser, **arguments):
# Handle deprecated arguments
if arguments.get("type", None):
self.deprecated_option("type", "Please use --controller instead.",
logger=logger, **arguments)
controller = arguments["type"]
if arguments.get("capacity", None):
self.deprecated_option("capacity", "Please use --size instead.",
logger=logger, **arguments)
size = arguments["capacity"]
dbmachine = Machine.get_unique(session, machine, compel=True)
q = session.query(Disk).filter_by(machine=dbmachine)
if disk:
q = q.filter_by(device_name=disk)
if controller:
if controller not in controller_types:
raise ArgumentError("%s is not a valid controller type, use "
"one of: %s." % (controller,
", ".join(controller_types)
))
q = q.filter_by(controller_type=controller)
if size is not None:
q = q.filter_by(capacity=size)
results = q.all()
if len(results) == 0:
raise NotFoundException("No disks found.")
elif len(results) > 1 and not all:
raise ArgumentError("More than one matching disks found. "
"Use --all to delete them all.")
for result in results:
session.delete(result)
session.flush()
session.expire(dbmachine, ['disks'])
plenary_machine = Plenary.get_plenary(dbmachine, logger=logger)
key = plenary_machine.get_write_key()
dbcontainer = dbmachine.vm_container
if dbcontainer:
plenary_container = Plenary.get_plenary(dbcontainer, logger=logger)
key = CompileKey.merge([key, plenary_container.get_write_key()])
try:
lock_queue.acquire(key)
if dbcontainer:
plenary_container.write(locked=True)
plenary_machine.write(locked=True)
except:
plenary_machine.restore_stash()
if dbcontainer:
plenary_container.restore_stash()
raise
finally:
lock_queue.release(key)
示例10: __init__
def __init__(self, dbcluster, logger=LOGGER):
Plenary.__init__(self, dbcluster, logger=logger)
self.name = dbcluster.name
if dbcluster.metacluster:
self.metacluster = dbcluster.metacluster.name
else:
self.metacluster = None
self.plenary_core = "clusterdata"
self.plenary_template = dbcluster.name
示例11: render
def render(self, session, logger, network_device, **arguments):
dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True)
# Check and complain if the network device has any other addresses than its
# primary address
addrs = []
for addr in dbnetdev.all_addresses():
if addr.ip == dbnetdev.primary_ip:
continue
addrs.append(str(addr.ip))
if addrs:
raise ArgumentError("{0} still provides the following addresses, "
"delete them first: {1}.".format
(dbnetdev, ", ".join(addrs)))
dbdns_rec = dbnetdev.primary_name
ip = dbnetdev.primary_ip
old_fqdn = str(dbnetdev.primary_name.fqdn)
old_comments = dbnetdev.comments
session.delete(dbnetdev)
if dbdns_rec:
delete_dns_record(dbdns_rec)
session.flush()
# Any network device ports hanging off this network device should be deleted with
# the cascade delete of the network device.
netdev_plenary = Plenary.get_plenary(dbnetdev, logger=logger)
# clusters connected to this network device
plenaries = PlenaryCollection(logger=logger)
for dbcluster in dbnetdev.esx_clusters:
plenaries.append(Plenary.get_plenary(dbcluster))
with CompileKey.merge([netdev_plenary.get_key(), plenaries.get_key()]):
netdev_plenary.stash()
try:
plenaries.write(locked=True)
netdev_plenary.remove(locked=True)
if ip:
dsdb_runner = DSDBRunner(logger=logger)
# FIXME: restore interface name/MAC on rollback
dsdb_runner.delete_host_details(old_fqdn, ip,
comments=old_comments)
dsdb_runner.commit_or_rollback("Could not remove network device "
"from DSDB")
except:
plenaries.restore_stash()
netdev_plenary.restore_stash()
raise
return
示例12: lookup_target
def lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup,
service_address, alias):
"""
Check the parameters of the server providing a given service
Look for potential conflicts, and return a dict that is suitable to be
passed to either the constructor of ServiceInstanceServer, or to the
find_server() function.
"""
params = {}
if cluster and hostname:
raise ArgumentError("Only one of --cluster and --hostname may be "
"specified.")
if alias:
dbdns_env = DnsEnvironment.get_unique_or_default(session)
dbdns_rec = Alias.get_unique(session, fqdn=alias,
dns_environment=dbdns_env, compel=True)
params["alias"] = dbdns_rec
if hostname:
params["host"] = hostname_to_host(session, hostname)
plenaries.append(Plenary.get_plenary(params["host"]))
if cluster:
params["cluster"] = Cluster.get_unique(session, cluster, compel=True)
plenaries.append(Plenary.get_plenary(params["cluster"]))
if service_address:
# TODO: calling get_resource_holder() means doing redundant DB lookups
# TODO: it would be nice to also accept an FQDN for the service address,
# to be consistent with the usage of the --service_address option in
# add_service_address/del_service_address
holder = get_resource_holder(session, hostname=hostname,
cluster=cluster,
resgroup=resourcegroup, compel=True)
dbsrv_addr = ServiceAddress.get_unique(session,
name=service_address,
holder=holder, compel=True)
params["service_address"] = dbsrv_addr
elif ip:
for addr in params["host"].hardware_entity.all_addresses():
if ip != addr.ip:
continue
if addr.service_address:
params["service_address"] = addr.service_address
else:
params["address_assignment"] = addr
break
return params
示例13: render
def render(self, session, logger, hostname, cluster,
personality, **arguments):
dbcluster = Cluster.get_unique(session, cluster, compel=True)
dbhost = hostname_to_host(session, hostname)
if not dbhost.cluster:
raise ArgumentError("{0} is not bound to a cluster.".format(dbhost))
if dbhost.cluster != dbcluster:
raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format(
dbhost, dbhost.cluster, dbcluster))
if personality:
dbpersonality = Personality.get_unique(session, name=personality,
archetype=dbhost.archetype,
compel=True)
if dbpersonality.cluster_required:
raise ArgumentError("Cannot switch host to personality %s "
"because that personality requires a "
"cluster" % personality)
dbhost.personality = dbpersonality
elif dbhost.personality.cluster_required:
raise ArgumentError("Host personality %s requires a cluster, "
"use --personality to change personality "
"when leaving the cluster." %
dbhost.personality.name)
dbcluster.hosts.remove(dbhost)
remove_service_addresses(dbcluster, dbhost)
dbcluster.validate()
session.flush()
session.expire(dbhost, ['_cluster'])
# Will need to write a cluster plenary and either write or
# remove a host plenary. Grab the domain key since the two
# must be in the same domain.
host_plenary = Plenary.get_plenary(dbhost, logger=logger)
cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger)
key = CompileKey(domain=dbcluster.branch.name, logger=logger)
try:
lock_queue.acquire(key)
cluster_plenary.write(locked=True)
try:
host_plenary.write(locked=True)
except IncompleteError:
host_plenary.cleanup(domain=dbhost.branch.name, locked=True)
except:
cluster_plenary.restore_stash()
host_plenary.restore_stash()
raise
finally:
lock_queue.release(key)
示例14: update_cluster_location
def update_cluster_location(session, logger, dbcluster,
fix_location, plenaries, remove_plenaries,
**arguments):
location_updated = False
dblocation = get_location(session, **arguments)
if fix_location:
dblocation = dbcluster.minimum_location
if not dblocation:
raise ArgumentError("Cannot infer the cluster location from "
"the host locations.")
if dblocation:
errors = []
if not dblocation.campus:
errors.append("{0} is not within a campus.".format(dblocation))
if dbcluster.cluster_type != 'meta':
for host in dbcluster.hosts:
if host.machine.location != dblocation and \
dblocation not in host.machine.location.parents:
errors.append("{0} has location {1}.".format(host,
host.machine.location))
else:
for cluster in dbcluster.members:
if cluster.location_constraint != dblocation and \
dblocation not in cluster.location_constraint.parents:
errors.append("{0} has location {1}.".format(cluster,
cluster.location_constraint))
if errors:
raise ArgumentError("Cannot set {0} location constraint to "
"{1}:\n{2}".format(dbcluster, dblocation,
"\n".join(errors)))
if dbcluster.location_constraint != dblocation:
if machine_plenary_will_move(old=dbcluster.location_constraint,
new=dblocation):
for dbmachine in dbcluster.machines:
# This plenary will have a path to the old location.
plenary = Plenary.get_plenary(dbmachine, logger=logger)
remove_plenaries.append(plenary)
dbmachine.location = dblocation
session.add(dbmachine)
# This plenary will have a path to the new location.
plenaries.append(Plenary.get_plenary(dbmachine))
# Update the path to the machine plenary in the
# container resource
plenaries.append(Plenary.get_plenary(dbmachine.vm_container))
dbcluster.location_constraint = dblocation
location_updated = True
return location_updated
示例15: __init__
def __init__(self, dbmachine, logger=LOGGER):
Plenary.__init__(self, dbmachine, logger=logger)
self.machine = dbmachine.label
loc = dbmachine.location
self.hub = loc.hub.fullname.lower()
self.building = loc.building.name
self.city = loc.city.name
self.continent = loc.continent.name
if loc.rack:
self.rack = loc.rack.name
self.rackrow = loc.rack.rack_row
self.rackcol = loc.rack.rack_column
else:
self.rack = None
if loc.room:
self.room = loc.room.name
else:
self.room = None
if loc.bunker:
self.bunker = loc.bunker.name
else:
self.bunker = None
if loc.campus:
self.campus = loc.campus.name
else:
self.campus = None
self.dns_search_domains = []
parents = loc.parents[:]
parents.append(loc)
parents.reverse()
for parent in parents:
# Filter out duplicates
extra_domains = [map.dns_domain.name
for map in parent.dns_maps
if map.dns_domain.name not in self.dns_search_domains]
self.dns_search_domains.extend(extra_domains)
self.sysloc = loc.sysloc()
# If this changes need to update machine_plenary_will_move() to match.
self.plenary_core = "machine/%(hub)s/%(building)s/%(rack)s" % self.__dict__
self.plenary_template = self.machine