本文整理汇总了Python中kubernetes.client.V1VolumeMount方法的典型用法代码示例。如果您正苦于以下问题:Python client.V1VolumeMount方法的具体用法?Python client.V1VolumeMount怎么用?Python client.V1VolumeMount使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类kubernetes.client
的用法示例。
在下文中一共展示了client.V1VolumeMount方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mount_pvc
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):
"""
Modifier function to apply to a Container Op to simplify volume, volume mount addition and
enable better reuse of volumes, volume claims across container ops.
Usage:
train = train_op(...)
train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))
"""
def _mount_pvc(task):
from kubernetes import client as k8s_client
# there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp)
# refer to #3906
if not hasattr(task, "add_volume") or not hasattr(task, "add_volume_mount"):
return task
local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
return (
task
.add_volume(
k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc)
)
.add_volume_mount(
k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name)
)
)
return _mount_pvc
示例2: volume_pipeline
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def volume_pipeline():
op1 = dsl.ContainerOp(
name='download',
image='google/cloud-sdk',
command=['sh', '-c'],
arguments=['ls | tee /tmp/results.txt'],
file_outputs={'downloaded': '/tmp/results.txt'}) \
.add_volume(k8s_client.V1Volume(name='gcp-credentials',
secret=k8s_client.V1SecretVolumeSource(
secret_name='user-gcp-sa'))) \
.add_volume_mount(k8s_client.V1VolumeMount(
mount_path='/secret/gcp-credentials', name='gcp-credentials')) \
.add_env_variable(k8s_client.V1EnvVar(
name='GOOGLE_APPLICATION_CREDENTIALS',
value='/secret/gcp-credentials/user-gcp-sa.json')) \
.add_env_variable(k8s_client.V1EnvVar(name='Foo', value='bar'))
op2 = dsl.ContainerOp(
name='echo',
image='library/bash',
command=['sh', '-c'],
arguments=['echo %s' % op1.output])
示例3: get_volume_mounts
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def get_volume_mounts(
self,
docker_volumes: Sequence[DockerVolume],
aws_ebs_volumes: Sequence[AwsEbsVolume],
persistent_volumes: Sequence[PersistentVolume],
) -> Sequence[V1VolumeMount]:
return (
[
V1VolumeMount(
mount_path=docker_volume["containerPath"],
name=self.get_docker_volume_name(docker_volume),
read_only=self.read_only_mode(docker_volume),
)
for docker_volume in docker_volumes
]
+ [
V1VolumeMount(
mount_path=aws_ebs_volume["container_path"],
name=self.get_aws_ebs_volume_name(aws_ebs_volume),
read_only=self.read_only_mode(aws_ebs_volume),
)
for aws_ebs_volume in aws_ebs_volumes
]
+ [
V1VolumeMount(
mount_path=volume["container_path"],
name=self.get_persistent_volume_name(volume),
read_only=self.read_only_mode(volume),
)
for volume in persistent_volumes
]
)
示例4: create_job_object
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def create_job_object(runner_image, region, s3_path, pvc_name):
target_folder = get_target_folder(s3_path)
# Configureate Pod template container
container = k8s_client.V1Container(
name="copy-dataset-worker",
image=runner_image,
command=["aws"],
args=["s3", "sync", s3_path, "/mnt/" + target_folder],
volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')],
env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region),
k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))),
k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret")))
],
)
volume = k8s_client.V1Volume(
name='data-storage',
persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
)
# Create and configurate a spec section
template = k8s_client.V1PodTemplateSpec(
# metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}),
spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure"))
# Create the specification of deployment
spec = k8s_client.V1JobSpec(
# selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}),
template=template)
# Instantiate the deployment object
deployment = k8s_client.V1Job(
api_version="batch/v1",
kind="Job",
metadata=k8s_client.V1ObjectMeta(name=container.name),
spec=spec)
return deployment
示例5: resnet_pipeline
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def resnet_pipeline(
raw_data_dir='/mnt/workspace/raw_data',
processed_data_dir='/mnt/workspace/processed_data',
model_dir='/mnt/workspace/saved_model',
epochs=50,
trtserver_name='trtis',
model_name='resnet_graphdef',
model_version=1,
webapp_prefix='webapp',
webapp_port=80
):
persistent_volume_name = 'nvidia-workspace'
persistent_volume_path = '/mnt/workspace'
op_dict = {}
op_dict['preprocess'] = PreprocessOp(
'preprocess', raw_data_dir, processed_data_dir)
op_dict['train'] = TrainOp(
'train', op_dict['preprocess'].output, model_dir, model_name, model_version, epochs)
op_dict['deploy_inference_server'] = InferenceServerLauncherOp(
'deploy_inference_server', op_dict['train'].output, trtserver_name)
op_dict['deploy_webapp'] = WebappLauncherOp(
'deploy_webapp', op_dict['deploy_inference_server'].output, model_name, model_version, webapp_prefix, webapp_port)
for _, container_op in op_dict.items():
container_op.add_volume(k8s_client.V1Volume(
host_path=k8s_client.V1HostPathVolumeSource(
path=persistent_volume_path),
name=persistent_volume_name))
container_op.add_volume_mount(k8s_client.V1VolumeMount(
mount_path=persistent_volume_path,
name=persistent_volume_name))
示例6: _FindVolumeMountForPath
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def _FindVolumeMountForPath(self, path) -> Optional[k8s_client.V1VolumeMount]:
if not os.path.exists(path):
return None
for mount in self._executor_container.volume_mounts:
if _is_subdirectory(mount.mount_path, self._model_path):
return mount
return None
示例7: _SetupModelVolumeIfNeeded
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def _SetupModelVolumeIfNeeded(self, pod_manifest: k8s_client.V1Pod):
mount = self._FindVolumeMountForPath(self._model_path)
if not mount:
return
[volume] = [v for v in self._executor_pod.spec.volumes
if v.name == mount.name]
if volume.persistent_volume_claim is None:
raise NotImplementedError('Only PersistentVolumeClaim is allowed.')
claim_name = volume.persistent_volume_claim.claim_name
pvc = self._k8s_core_api.read_namespaced_persistent_volume_claim(
name=claim_name,
namespace=self._namespace)
# PersistentVolumeClaim for pipeline root SHOULD have ReadWriteMany access
# mode. Although it is allowed to mount ReadWriteOnce volume if Pods share
# the Node, there's no guarantee the model server Pod will be launched in
# the same Node.
if all(access_mode != _AccessMode.READ_WRITE_MANY.value
for access_mode in pvc.spec.access_modes):
raise RuntimeError('Access mode should be ReadWriteMany.')
logging.info('PersistentVolumeClaim %s will be mounted to %s.',
pvc, mount.mount_path)
pod_manifest.spec.volumes.append(
k8s_client.V1Volume(
name=_MODEL_SERVER_MODEL_VOLUME_NAME,
persistent_volume_claim=k8s_client
.V1PersistentVolumeClaimVolumeSource(
claim_name=claim_name,
read_only=True)))
container_manifest = _get_container_or_error(
pod_manifest, container_name=_MODEL_SERVER_CONTAINER_NAME)
container_manifest.volume_mounts.append(
k8s_client.V1VolumeMount(
name=_MODEL_SERVER_MODEL_VOLUME_NAME,
mount_path=mount.mount_path,
read_only=True,
)
)
示例8: onnx_pipeline
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def onnx_pipeline(
model,
output_onnx_path,
model_type,
output_perf_result_path,
execution_providers="",
model_inputs_names="",
model_outputs_names="",
model_input_shapes="",
model_initial_types="",
caffe_model_prototxt="",
target_opset=7):
# Create a component named "Convert To ONNX" and "ONNX Runtime Perf". Edit the V1PersistentVolumeClaimVolumeSource
# name to match the persistent volume claim you created if needed. By default the names match ../azure-files-sc.yaml
# and ../azure-files-pvc.yaml
convert_op = onnxConverterOp('Convert To ONNX',
'%s' % model,
'%s' % output_onnx_path,
'%s' % model_type,
'%s' % model_inputs_names,
'%s' % model_outputs_names,
'%s' % model_input_shapes,
'%s' % model_initial_types,
'%s' % caffe_model_prototxt,
'%s' % target_opset).add_volume(
k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name='azurefile'))).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs'))
perf_op = perfTestOp('ONNX Runtime Perf',
convert_op.output,
'%s' % output_perf_result_path,
'%s' % execution_providers,
).add_volume(
k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
claim_name='azurefile'))).add_volume_mount(
k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs')).set_gpu_limit(1)
dsl.get_pipeline_conf().set_image_pull_secrets([k8s_client.V1ObjectReference(name="regcred")])
示例9: use_secret
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def use_secret(secret_name:str, secret_volume_mount_path:str, env_variable:str=None, secret_file_path_in_volume:str=None):
"""
An operator that configures the container to use a secret.
This assumes that the secret is created and availabel in the k8s cluster.
Keyword Arguments:
secret_name {String} -- [Required] The k8s secret name.
secret_volume_mount_path {String} -- [Required] The path to the secret that is mounted.
env_variable {String} -- Env variable pointing to the mounted secret file. Requires both the env_variable and secret_file_path_in_volume to be defined.
The value is the path to the secret.
secret_file_path_in_volume {String} -- The path to the secret in the volume. This will be the value of env_variable.
Both env_variable and secret_file_path_in_volume needs to be set if any env variable should be created.
Raises:
ValueError: If not the necessary variables (secret_name, volume_name", secret_volume_mount_path) are supplied.
Or only one of env_variable and secret_file_path_in_volume are supplied
Returns:
[ContainerOperator] -- Returns the container operator after it has been modified.
"""
secret_name = str(secret_name)
if '{{' in secret_name:
volume_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=10)) + "_volume"
else:
volume_name = secret_name
for param, param_name in zip([secret_name, secret_volume_mount_path],["secret_name","secret_volume_mount_path"]):
if param == "":
raise ValueError("The '{}' must not be empty".format(param_name))
if bool(env_variable) != bool(secret_file_path_in_volume):
raise ValueError("Both {} and {} needs to be supplied together or not at all".format(env_variable, secret_file_path_in_volume))
def _use_secret(task):
import os
from kubernetes import client as k8s_client
task = task.add_volume(
k8s_client.V1Volume(
name=volume_name,
secret=k8s_client.V1SecretVolumeSource(
secret_name=secret_name
)
)
).add_volume_mount(
k8s_client.V1VolumeMount(
name=volume_name,
mount_path=secret_volume_mount_path
)
)
if env_variable:
task.container.add_env_variable(
k8s_client.V1EnvVar(
name=env_variable,
value=os.path.join(secret_volume_mount_path, secret_file_path_in_volume),
)
)
return task
return _use_secret
示例10: use_gcp_secret
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
"""An operator that configures the container to use GCP service account by service account key
stored in a Kubernetes secret.
For cluster setup and alternatives to using service account key, check https://www.kubeflow.org/docs/gke/authentication-pipelines/.
"""
# permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
if secret_file_path_in_volume is None:
secret_file_path_in_volume = '/' + secret_name + '.json'
if volume_name is None:
volume_name = 'gcp-credentials-' + secret_name
else:
import warnings
warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)
def _use_gcp_secret(task):
from kubernetes import client as k8s_client
task = task.add_volume(
k8s_client.V1Volume(
name=volume_name,
secret=k8s_client.V1SecretVolumeSource(
secret_name=secret_name,
)
)
)
task.container \
.add_volume_mount(
k8s_client.V1VolumeMount(
name=volume_name,
mount_path=secret_volume_mount_path,
)
) \
.add_env_variable(
k8s_client.V1EnvVar(
name='GOOGLE_APPLICATION_CREDENTIALS',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
) \
.add_env_variable(
k8s_client.V1EnvVar(
name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
value=secret_volume_mount_path + secret_file_path_in_volume,
)
) # Set GCloud Credentials by using the env var override.
# TODO: Is there a better way for GCloud to pick up the credential?
return task
return _use_gcp_secret
示例11: _AssumeInsideKfp
# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1VolumeMount [as 别名]
def _AssumeInsideKfp(
self,
namespace='my-namespace',
pod_name='my-pod-name',
pod_uid='my-pod-uid',
pod_service_account_name='my-service-account-name',
with_pvc=False):
pod = k8s_client.V1Pod(
api_version='v1',
kind='Pod',
metadata=k8s_client.V1ObjectMeta(
name=pod_name,
uid=pod_uid,
),
spec=k8s_client.V1PodSpec(
containers=[
k8s_client.V1Container(
name='main',
volume_mounts=[]),
],
volumes=[]))
if with_pvc:
pod.spec.volumes.append(
k8s_client.V1Volume(
name='my-volume',
persistent_volume_claim=k8s_client
.V1PersistentVolumeClaimVolumeSource(
claim_name='my-pvc')))
pod.spec.containers[0].volume_mounts.append(
k8s_client.V1VolumeMount(
name='my-volume',
mount_path=self._base_dir))
mock.patch.object(kube_utils, 'is_inside_kfp', return_value=True).start()
pod.spec.service_account_name = pod_service_account_name
mock.patch.object(kube_utils, 'get_current_kfp_pod',
return_value=pod).start()
mock.patch.object(kube_utils, 'get_kfp_namespace',
return_value=namespace).start()
if with_pvc:
(self._mock_core_v1_api.read_namespaced_persistent_volume_claim
.return_value) = k8s_client.V1PersistentVolumeClaim(
metadata=k8s_client.V1ObjectMeta(
name='my-pvc'),
spec=k8s_client.V1PersistentVolumeClaimSpec(
access_modes=['ReadWriteMany']))