本文整理汇总了Python中charms.kubernetes.flagmanager.FlagManager.add方法的典型用法代码示例。如果您正苦于以下问题:Python FlagManager.add方法的具体用法?Python FlagManager.add怎么用?Python FlagManager.add使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类charms.kubernetes.flagmanager.FlagManager
的用法示例。
在下文中一共展示了FlagManager.add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_privileged
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def set_privileged(privileged, render_config=True):
"""Update the KUBE_ALLOW_PRIV flag for kubelet and re-render config files.
If the flag already matches the requested value, this is a no-op.
:param str privileged: "true" or "false"
:param bool render_config: whether to render new config files
:return: True if the flag was changed, else false
"""
if privileged == "true":
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
flag = '--allow-privileged'
kube_allow_priv_opts = FlagManager('KUBE_ALLOW_PRIV')
if kube_allow_priv_opts.get(flag) == privileged:
# Flag isn't changing, nothing to do
return False
hookenv.log('Setting {}={}'.format(flag, privileged))
# Update --allow-privileged flag value
kube_allow_priv_opts.add(flag, privileged, strict=True)
# re-render config with new options
if render_config:
render_init_scripts()
# signal that we need a kubelet restart
set_state('kubernetes-worker.kubelet.restart')
return True
示例2: setup_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv')
api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv')
api_opts.add('--service-cluster-ip-range', service_cidr())
hookenv.status_set('maintenance', 'Rendering authentication templates.')
htaccess = '/srv/kubernetes/basic_auth.csv'
if not os.path.isfile(htaccess):
setup_basic_auth('admin', 'admin', 'admin')
known_tokens = '/srv/kubernetes/known_tokens.csv'
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/etc/kubernetes', exist_ok=True)
cmd = ['openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key',
'2048']
check_call(cmd)
api_opts.add('--service-account-key-file',
'/etc/kubernetes/serviceaccount.key')
controller_opts.add('--service-account-private-key-file',
'/etc/kubernetes/serviceaccount.key')
set_state('authentication.setup')
示例3: configure_kubernetes_service
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def configure_kubernetes_service(service, base_args, extra_args_key):
# Handle api-extra-args config option
to_add, to_remove = get_config_args(extra_args_key)
flag_manager = FlagManager(service)
# Remove arguments that are no longer provided as config option
# this allows them to be reverted to charm defaults
for arg in to_remove:
hookenv.log('Removing option: {}'.format(arg))
flag_manager.destroy(arg)
# We need to "unset" options by setting their value to "null" string
cmd = ['snap', 'set', service, '{}=null'.format(arg)]
check_call(cmd)
# Add base arguments
for k, v in base_args.items():
flag_manager.add(k, v, strict=True)
# Add operator-provided arguments, this allows operators
# to override defaults
for arg in to_add:
hookenv.log('Adding option: {} {}'.format(arg[0], arg[1]))
# Make sure old value is gone
flag_manager.destroy(arg[0])
flag_manager.add(arg[0], arg[1], strict=True)
cmd = ['snap', 'set', service] + flag_manager.to_s().split(' ')
check_call(cmd)
示例4: setup_non_leader_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_non_leader_authentication():
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
keys = [service_key, basic_auth, known_tokens]
# The source of truth for non-leaders is the leader.
# Therefore we overwrite_local with whatever the leader has.
if not get_keys_from_leader(keys, overwrite_local=True):
# the keys were not retrieved. Non-leaders have to retry.
return
if not any_file_changed(keys) and is_state('authentication.setup'):
# No change detected and we have already setup the authentication
return
hookenv.status_set('maintenance', 'Rendering authentication templates.')
api_opts = FlagManager('kube-apiserver')
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
api_opts.add('service-account-key-file', service_key)
controller_opts = FlagManager('kube-controller-manager')
controller_opts.add('service-account-private-key-file', service_key)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
示例5: handle_etcd_relation
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def handle_etcd_relation(reldata):
''' Save the client credentials and set appropriate daemon flags when
etcd declares itself as available'''
connection_string = reldata.get_connection_string()
# Define where the etcd tls files will be kept.
etcd_dir = '/root/cdk/etcd'
# Create paths to the etcd client ca, key, and cert file locations.
ca = os.path.join(etcd_dir, 'client-ca.pem')
key = os.path.join(etcd_dir, 'client-key.pem')
cert = os.path.join(etcd_dir, 'client-cert.pem')
# Save the client credentials (in relation data) to the paths provided.
reldata.save_client_credentials(key, cert, ca)
api_opts = FlagManager('kube-apiserver')
# Never use stale data, always prefer whats coming in during context
# building. if its stale, its because whats in unitdata is stale
data = api_opts.data
if data.get('etcd-servers-strict') or data.get('etcd-servers'):
api_opts.destroy('etcd-cafile')
api_opts.destroy('etcd-keyfile')
api_opts.destroy('etcd-certfile')
api_opts.destroy('etcd-servers', strict=True)
api_opts.destroy('etcd-servers')
# Set the apiserver flags in the options manager
api_opts.add('etcd-cafile', ca)
api_opts.add('etcd-keyfile', key)
api_opts.add('etcd-certfile', cert)
api_opts.add('etcd-servers', connection_string, strict=True)
示例6: set_privileged
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def set_privileged():
"""Update the allow-privileged flag for kubelet.
"""
privileged = hookenv.config('allow-privileged')
if privileged == 'auto':
gpu_enabled = is_state('kubernetes-worker.gpu.enabled')
privileged = 'true' if gpu_enabled else 'false'
flag = 'allow-privileged'
hookenv.log('Setting {}={}'.format(flag, privileged))
kubelet_opts = FlagManager('kubelet')
kubelet_opts.add(flag, privileged)
if privileged == 'true':
set_state('kubernetes-worker.privileged')
else:
remove_state('kubernetes-worker.privileged')
示例7: configure_controller_manager
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def configure_controller_manager():
controller_opts = FlagManager('kube-controller-manager')
# Get the tls paths from the layer data.
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
# Default to 3 minute resync. TODO: Make this configureable?
controller_opts.add('min-resync-period', '3m')
controller_opts.add('v', '2')
controller_opts.add('root-ca-file', ca_cert_path)
controller_opts.add('logtostderr', 'true')
controller_opts.add('master', 'http://127.0.0.1:8080')
cmd = (
['snap', 'set', 'kube-controller-manager'] +
controller_opts.to_s().split(' ')
)
check_call(cmd)
set_state('kube-controller-manager.do-restart')
示例8: setup_leader_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/etc/kubernetes/serviceaccount.key'
basic_auth = '/srv/kubernetes/basic_auth.csv'
known_tokens = '/srv/kubernetes/known_tokens.csv'
api_opts.add('--basic-auth-file', basic_auth)
api_opts.add('--token-auth-file', known_tokens)
api_opts.add('--service-cluster-ip-range', service_cidr())
hookenv.status_set('maintenance', 'Rendering authentication templates.')
if not os.path.isfile(basic_auth):
setup_basic_auth('admin', 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/etc/kubernetes', exist_ok=True)
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
api_opts.add('--service-account-key-file', service_key)
controller_opts.add('--service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/etc/kubernetes/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
set_state('authentication.setup')
示例9: start_worker
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def start_worker(kube_api, kube_control, cni):
''' Start kubelet using the provided API and DNS info.'''
config = hookenv.config()
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns)):
# Create FlagManager for kubelet and add dns flags
opts = FlagManager('kubelet')
opts.add('--cluster-dns', dns['sdn-ip']) # FIXME sdn-ip needs a rename
opts.add('--cluster-domain', dns['domain'])
# Create FlagManager for KUBE_MASTER and add api server addresses
kube_master_opts = FlagManager('KUBE_MASTER')
kube_master_opts.add('--master', ','.join(servers))
# set --allow-privileged flag for kubelet
set_privileged(
"true" if config['allow-privileged'] == "true" else "false",
render_config=False)
create_config(servers[0])
render_init_scripts()
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
示例10: setup_non_leader_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_non_leader_authentication():
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
if not get_keys_from_leader(keys):
# the keys were not retrieved. Non-leaders have to retry.
return
api_opts.add('--basic-auth-file', basic_auth)
api_opts.add('--token-auth-file', known_tokens)
api_opts.add('--service-cluster-ip-range', service_cidr())
api_opts.add('--service-account-key-file', service_key)
controller_opts.add('--service-account-private-key-file', service_key)
set_state('authentication.setup')
示例11: setup_non_leader_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_non_leader_authentication():
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/etc/kubernetes/serviceaccount.key'
basic_auth = '/srv/kubernetes/basic_auth.csv'
known_tokens = '/srv/kubernetes/known_tokens.csv'
# This races with other codepaths, and seems to require being created first
# This block may be extracted later, but for now seems to work as intended
os.makedirs('/etc/kubernetes', exist_ok=True)
os.makedirs('/srv/kubernetes', exist_ok=True)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
# Set an array for looping logic
keys = [service_key, basic_auth, known_tokens]
for k in keys:
# If the path does not exist, assume we need it
if not os.path.exists(k):
# Fetch data from leadership broadcast
contents = charms.leadership.leader_get(k)
# Default to logging the warning and wait for leader data to be set
if contents is None:
msg = "Waiting on leaders crypto keys."
hookenv.status_set('waiting', msg)
hookenv.log('Missing content for file {}'.format(k))
return
# Write out the file and move on to the next item
with open(k, 'w+') as fp:
fp.write(contents)
api_opts.add('--basic-auth-file', basic_auth)
api_opts.add('--token-auth-file', known_tokens)
api_opts.add('--service-cluster-ip-range', service_cidr())
api_opts.add('--service-account-key-file', service_key)
controller_opts.add('--service-account-private-key-file', service_key)
set_state('authentication.setup')
示例12: setup_leader_authentication
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def setup_leader_authentication():
'''Setup basic authentication and token access for the cluster.'''
api_opts = FlagManager('kube-apiserver')
controller_opts = FlagManager('kube-controller-manager')
service_key = '/root/cdk/serviceaccount.key'
basic_auth = '/root/cdk/basic_auth.csv'
known_tokens = '/root/cdk/known_tokens.csv'
api_opts.add('basic-auth-file', basic_auth)
api_opts.add('token-auth-file', known_tokens)
hookenv.status_set('maintenance', 'Rendering authentication templates.')
keys = [service_key, basic_auth, known_tokens]
# Try first to fetch data from an old leadership broadcast.
if not get_keys_from_leader(keys) \
or is_state('reconfigure.authentication.setup'):
last_pass = get_password('basic_auth.csv', 'admin')
setup_basic_auth(last_pass, 'admin', 'admin')
if not os.path.isfile(known_tokens):
setup_tokens(None, 'admin', 'admin')
setup_tokens(None, 'kubelet', 'kubelet')
setup_tokens(None, 'kube_proxy', 'kube_proxy')
# Generate the default service account token key
os.makedirs('/root/cdk', exist_ok=True)
if not os.path.isfile(service_key):
cmd = ['openssl', 'genrsa', '-out', service_key,
'2048']
check_call(cmd)
remove_state('reconfigure.authentication.setup')
api_opts.add('service-account-key-file', service_key)
controller_opts.add('service-account-private-key-file', service_key)
# read service account key for syndication
leader_data = {}
for f in [known_tokens, basic_auth, service_key]:
with open(f, 'r') as fp:
leader_data[f] = fp.read()
# this is slightly opaque, but we are sending file contents under its file
# path as a key.
# eg:
# {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}
charms.leadership.leader_set(leader_data)
remove_state('kubernetes-master.components.started')
set_state('authentication.setup')
示例13: enable_gpu
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('--experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('--feature-gates', 'Accelerators=true')
# enable privileged mode and re-render config files
set_privileged("true", render_config=False)
render_init_scripts()
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.kubelet.restart')
示例14: enable_gpu
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def enable_gpu():
"""Enable GPU usage on this node.
"""
config = hookenv.config()
if config['allow-privileged'] == "false":
hookenv.status_set(
'active',
'GPUs available. Set allow-privileged="auto" to enable.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
kubelet_opts = FlagManager('kubelet')
if get_version('kubelet') < (1, 6):
hookenv.log('Adding --experimental-nvidia-gpus=1 to kubelet')
kubelet_opts.add('experimental-nvidia-gpus', '1')
else:
hookenv.log('Adding --feature-gates=Accelerators=true to kubelet')
kubelet_opts.add('feature-gates', 'Accelerators=true')
# Apply node labels
_apply_node_label('gpu=true', overwrite=True)
_apply_node_label('cuda=true', overwrite=True)
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
示例15: start_worker
# 需要导入模块: from charms.kubernetes.flagmanager import FlagManager [as 别名]
# 或者: from charms.kubernetes.flagmanager.FlagManager import add [as 别名]
def start_worker(kube_api, kube_dns, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_dns.details()
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns)):
# Initialize a FlagManager object to add flags to unit data.
opts = FlagManager('kubelet')
# Append the DNS flags + data to the FlagManager object.
opts.add('--cluster-dns', dns['sdn-ip']) # FIXME sdn-ip needs a rename
opts.add('--cluster-domain', dns['domain'])
create_config(servers[0])
render_init_scripts(servers)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()