本文整理汇总了Python中juju.state.service.ServiceStateManager类的典型用法代码示例。如果您正苦于以下问题:Python ServiceStateManager类的具体用法?Python ServiceStateManager怎么用?Python ServiceStateManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ServiceStateManager类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: config_set
def config_set(environment, service_name, service_options):
"""Set service settings.
"""
provider = environment.get_machine_provider()
client = yield provider.connect()
# Get the service and the charm
service_manager = ServiceStateManager(client)
service = yield service_manager.get_service_state(service_name)
charm = yield service.get_charm_state()
# Use the charm's ConfigOptions instance to validate the
# arguments to config_set. Invalid options passed to this method
# will thrown an exception.
if isinstance(service_options, dict):
options = service_options
else:
options = parse_keyvalue_pairs(service_options)
config = yield charm.get_config()
# ignore the output of validate, we run it so it might throw an exception
config.validate(options)
# Apply the change
state = yield service.get_config()
state.update(options)
yield state.write()
示例2: validate_hooks
def validate_hooks(client, unit_state, hook_names):
# Assemble a list of valid hooks for the charm.
valid_hooks = ["start", "stop", "install", "config-changed"]
service_manager = ServiceStateManager(client)
endpoints = yield service_manager.get_relation_endpoints(
unit_state.service_name)
endpoint_names = [ep.relation_name for ep in endpoints]
for endpoint_name in endpoint_names:
valid_hooks.extend([
endpoint_name + "-relation-joined",
endpoint_name + "-relation-changed",
endpoint_name + "-relation-departed",
endpoint_name + "-relation-broken",
])
# Verify the debug names.
for hook_name in hook_names:
if hook_name in valid_hooks:
continue
break
else:
returnValue(True)
# We dereference to the charm to give a fully qualified error
# message. I wish this was a little easier to dereference, the
# service_manager.get_relation_endpoints effectively does this
# already.
service_manager = ServiceStateManager(client)
service_state = yield service_manager.get_service_state(
unit_state.service_name)
charm_id = yield service_state.get_charm_id()
charm_manager = CharmStateManager(client)
charm = yield charm_manager.get_charm_state(charm_id)
raise InvalidCharmHook(charm.id, hook_name)
示例3: test_deploy_legacy_keys_in_fresh_env
def test_deploy_legacy_keys_in_fresh_env(self):
yield self.push_default_config()
local_config = {
"environments": {"firstenv": {
"type": "dummy",
"some-legacy-key": "blah",
"default-series": "series"}}}
self.write_config(yaml.dump(local_config))
self.config.load()
finished = self.setup_cli_reactor()
self.setup_exit(0)
self.mocker.replay()
stderr = self.capture_stream("stderr")
main(["deploy", "--repository", self.unbundled_repo_path,
"local:sample", "beekeeper"])
yield finished
self.assertIn(
"Your environments.yaml contains deprecated keys",
stderr.getvalue())
service_manager = ServiceStateManager(self.client)
yield self.assertFailure(
service_manager.get_service_state("beekeeper"),
ServiceStateNotFound)
示例4: test_deploy
def test_deploy(self):
"""Create service, and service unit on machine from charm"""
environment = self.config.get("firstenv")
yield deploy.deploy(
self.config, environment, self.unbundled_repo_path, "local:sample",
"myblog", logging.getLogger("deploy"), ["cpu=123"])
topology = yield self.get_topology()
service_id = topology.find_service_with_name("myblog")
self.assertEqual(service_id, "service-%010d" % 0)
exists = yield self.client.exists("/services/%s" % service_id)
self.assertTrue(exists)
service_state_manager = ServiceStateManager(self.client)
service_state = yield service_state_manager.get_service_state("myblog")
charm_id = yield service_state.get_charm_id()
self.assertEquals(charm_id, "local:series/sample-2")
constraints = yield service_state.get_constraints()
expect_constraints = {
"arch": "amd64", "cpu": 123, "mem": 512,
"provider-type": "dummy", "ubuntu-series": "series"}
self.assertEquals(constraints, expect_constraints)
machine_ids = topology.get_machines()
self.assertEqual(
machine_ids,
["machine-%010d" % 0, "machine-%010d" % 1])
exists = yield self.client.exists("/machines/%s" % machine_ids[0])
self.assertTrue(exists)
unit_ids = topology.get_service_units(service_id)
self.assertEqual(unit_ids, ["unit-%010d" % 0])
exists = yield self.client.exists("/units/%s" % unit_ids[0])
self.assertTrue(exists)
示例5: destroy_service
def destroy_service(config, environment, verbose, log, service_name):
provider = environment.get_machine_provider()
client = yield provider.connect()
service_manager = ServiceStateManager(client)
service_state = yield service_manager.get_service_state(service_name)
yield service_manager.remove_service_state(service_state)
log.info("Service %r destroyed.", service_state.service_name)
示例6: add_unit
def add_unit(config, environment, verbose, log, service_name, num_units):
"""Add a unit of a service to the environment.
"""
provider = environment.get_machine_provider()
placement_policy = provider.get_placement_policy()
client = yield provider.connect()
try:
yield legacy.check_environment(
client, provider.get_legacy_config_keys())
yield sync_environment_state(client, config, environment.name)
service_manager = ServiceStateManager(client)
service_state = yield service_manager.get_service_state(service_name)
if (yield service_state.is_subordinate()):
raise JujuError("Subordinate services acquire units from "
"their principal service.")
for i in range(num_units):
unit_state = yield service_state.add_unit_state()
yield place_unit(client, placement_policy, unit_state)
log.info("Unit %r added to service %r",
unit_state.unit_name, service_state.service_name)
finally:
yield client.close()
示例7: destroy_service
def destroy_service(config, environment, verbose, log, service_name):
provider = environment.get_machine_provider()
client = yield provider.connect()
service_manager = ServiceStateManager(client)
service_state = yield service_manager.get_service_state(service_name)
if (yield service_state.is_subordinate()):
# We can destroy the service if does not have relations.
# That implies that principals have already been torn
# down (or were never added).
relation_manager = RelationStateManager(client)
relations = yield relation_manager.get_relations_for_service(
service_state)
if relations:
principal_service = None
# if we have a container we can destroy the subordinate
# (revisit in the future)
for relation in relations:
if relation.relation_scope != "container":
continue
services = yield relation.get_service_states()
remote_service = [s for s in services if s.service_name !=
service_state.service_name][0]
if not (yield remote_service.is_subordinate()):
principal_service = remote_service
break
if principal_service:
raise UnsupportedSubordinateServiceRemoval(
service_state.service_name,
principal_service.service_name)
yield service_manager.remove_service_state(service_state)
log.info("Service %r destroyed.", service_state.service_name)
示例8: add_relation
def add_relation(env_config, environment, verbose, log, *descriptors):
"""Add relation between relation endpoints described by `descriptors`"""
provider = environment.get_machine_provider()
client = yield provider.connect()
relation_state_manager = RelationStateManager(client)
service_state_manager = ServiceStateManager(client)
endpoint_pairs = yield service_state_manager.join_descriptors(
*descriptors)
if verbose:
log.info("Endpoint pairs: %s", endpoint_pairs)
if len(endpoint_pairs) == 0:
raise NoMatchingEndpoints()
elif len(endpoint_pairs) > 1:
raise AmbiguousRelation(descriptors, endpoint_pairs)
# At this point we just have one endpoint pair. We need to pick
# just one of the endpoints if it's a peer endpoint, since that's
# our current API - join descriptors takes two descriptors, but
# add_relation_state takes one or two endpoints. TODO consider
# refactoring.
endpoints = endpoint_pairs[0]
if endpoints[0] == endpoints[1]:
endpoints = endpoints[0:1]
yield relation_state_manager.add_relation_state(*endpoints)
yield client.close()
log.info("Added %s relation to all service units.",
endpoints[0].relation_type)
示例9: start
def start(self):
"""Start the unit agent process."""
service_state_manager = ServiceStateManager(self.client)
# Retrieve our unit and configure working directories.
service_name = self.unit_name.split("/")[0]
self.service_state = yield service_state_manager.get_service_state(
service_name)
self.unit_state = yield self.service_state.get_unit_state(
self.unit_name)
self.unit_directory = os.path.join(
self.config["juju_directory"], "units",
self.unit_state.unit_name.replace("/", "-"))
self.state_directory = os.path.join(
self.config["juju_directory"], "state")
# Setup the server portion of the cli api exposed to hooks.
socket_path = os.path.join(self.unit_directory, HOOK_SOCKET_FILE)
if os.path.exists(socket_path):
os.unlink(socket_path)
from twisted.internet import reactor
self.api_socket = reactor.listenUNIX(socket_path, self.api_factory)
# Setup the unit state's address
address = yield get_unit_address(self.client)
yield self.unit_state.set_public_address(
(yield address.get_public_address()))
yield self.unit_state.set_private_address(
(yield address.get_private_address()))
if self.get_watch_enabled():
yield self.unit_state.watch_hook_debug(self.cb_watch_hook_debug)
# Inform the system, we're alive.
yield self.unit_state.connect_agent()
# Start paying attention to the debug-log setting
if self.get_watch_enabled():
yield self.unit_state.watch_hook_debug(self.cb_watch_hook_debug)
self.lifecycle = UnitLifecycle(
self.client, self.unit_state, self.service_state,
self.unit_directory, self.state_directory, self.executor)
self.workflow = UnitWorkflowState(
self.client, self.unit_state, self.lifecycle, self.state_directory)
# Set up correct lifecycle and executor state given the persistent
# unit workflow state, and fire any starting transitions if necessary.
with (yield self.workflow.lock()):
yield self.workflow.synchronize(self.executor)
if self.get_watch_enabled():
yield self.unit_state.watch_resolved(self.cb_watch_resolved)
yield self.service_state.watch_config_state(
self.cb_watch_config_changed)
yield self.unit_state.watch_upgrade_flag(
self.cb_watch_upgrade_flag)
示例10: get_local_service
def get_local_service(self):
"""Return ServiceState for the local service."""
if self._service is None:
service_state_manager = ServiceStateManager(self._client)
self._service = yield(
service_state_manager.get_service_state(
parse_service_name(self._unit_name)))
returnValue(self._service)
示例11: constraints_set
def constraints_set(env_config, environment, service_name, constraint_strs):
"""
Machine constraints allow you to pick the hardware to which your services
will be deployed. Examples:
$ juju set-constraints --service-name mysql mem=8G cpu=4
$ juju set-constraints instance-type=t1.micro
Available constraints vary by provider type, and will be ignored if not
understood by the current environment's provider. The current set of
available constraints across all providers is:
On Amazon EC2:
* arch (CPU architecture: i386/amd64/arm; amd64 by default)
* cpu (processing power in Amazon ECU; 1 by default)
* mem (memory in [MGT]iB; 512M by default)
* instance-type (unset by default)
* ec2-zone (unset by default)
On Orchestra:
* orchestra-classes (unset by default)
On MAAS:
* maas-name (unset by default)
Service settings, if specified, will override environment settings, which
will in turn override the juju defaults of mem=512M, cpu=1, arch=amd64.
New constraints set on an entity will completely replace that entity's
pre-existing constraints.
To override an environment constraint with the juju default when setting
service constraints, just specify "name=" (rather than just not specifying
the constraint at all, which will cause it to inherit the environment's
value).
To entirely unset a constraint, specify "name=any".
"""
provider = environment.get_machine_provider()
constraint_set = yield provider.get_constraint_set()
constraints = constraint_set.parse(constraint_strs)
client = yield provider.connect()
try:
yield legacy.check_constraints(client, constraint_strs)
yield sync_environment_state(client, env_config, environment.name)
if service_name is None:
esm = EnvironmentStateManager(client)
yield esm.set_constraints(constraints)
else:
ssm = ServiceStateManager(client)
service = yield ssm.get_service_state(service_name)
yield service.set_constraints(constraints)
finally:
yield client.close()
示例12: remove_relation
def remove_relation(env_config, environment, verbose, log, *descriptors):
"""Remove relation between relation endpoints described by `descriptors`"""
provider = environment.get_machine_provider()
client = yield provider.connect()
relation_state_manager = RelationStateManager(client)
service_state_manager = ServiceStateManager(client)
endpoint_pairs = yield service_state_manager.join_descriptors(
*descriptors)
if verbose:
log.info("Endpoint pairs: %s", endpoint_pairs)
if len(endpoint_pairs) == 0:
raise NoMatchingEndpoints()
elif len(endpoint_pairs) > 1:
raise AmbiguousRelation(descriptors, endpoint_pairs)
# At this point we just have one endpoint pair. We need to pick
# just one of the endpoints if it's a peer endpoint, since that's
# our current API - join descriptors takes two descriptors, but
# add_relation_state takes one or two endpoints. TODO consider
# refactoring.
endpoints = endpoint_pairs[0]
if endpoints[0] == endpoints[1]:
endpoints = endpoints[0:1]
relation_state = yield relation_state_manager.get_relation_state(
*endpoints)
# Look at both endpoints, if we are dealing with a container relation
# decide if one end is a principal.
service_pair = [] # ordered such that sub, principal
is_container = False
has_principal = True
for ep in endpoints:
if ep.relation_scope == "container":
is_container = True
service = yield service_state_manager.get_service_state(
ep.service_name)
if (yield service.is_subordinate()):
service_pair.append(service)
has_principal = True
else:
service_pair.insert(0, service)
if is_container and len(service_pair) == 2 and has_principal:
sub, principal = service_pair
raise UnsupportedSubordinateServiceRemoval(sub.service_name,
principal.service_name)
yield relation_state_manager.remove_relation_state(relation_state)
yield client.close()
log.info("Removed %s relation from all service units.",
endpoints[0].relation_type)
示例13: test_deploy_legacy_keys_in_legacy_env
def test_deploy_legacy_keys_in_legacy_env(self):
yield self.client.delete("/constraints")
finished = self.setup_cli_reactor()
self.setup_exit(0)
self.mocker.replay()
main(["deploy", "--repository", self.unbundled_repo_path,
"local:sample", "beekeeper"])
yield finished
service_manager = ServiceStateManager(self.client)
yield service_manager.get_service_state("beekeeper")
示例14: test_deploy_adds_peer_relations
def test_deploy_adds_peer_relations(self):
"""Deploy automatically adds a peer relations."""
environment = self.config.get("firstenv")
yield deploy.deploy(
self.config, environment, self.unbundled_repo_path, "local:riak", None, logging.getLogger("deploy")
)
service_manager = ServiceStateManager(self.client)
service_state = yield service_manager.get_service_state("riak")
relation_manager = RelationStateManager(self.client)
relations = yield relation_manager.get_relations_for_service(service_state)
self.assertEqual(len(relations), 1)
self.assertEqual(relations[0].relation_name, "ring")
示例15: remove_unit
def remove_unit(config, environment, verbose, log, unit_names):
provider = environment.get_machine_provider()
client = yield provider.connect()
try:
service_manager = ServiceStateManager(client)
for unit_name in unit_names:
service_name = parse_service_name(unit_name)
service_state = yield service_manager.get_service_state(
service_name)
unit_state = yield service_state.get_unit_state(unit_name)
yield service_state.remove_unit_state(unit_state)
log.info("Unit %r removed from service %r",
unit_state.unit_name, service_state.service_name)
finally:
yield client.close()