当前位置: 首页>>代码示例>>Python>>正文


Python client.V1Volume方法代码示例

本文整理汇总了Python中kubernetes.client.V1Volume方法的典型用法代码示例。如果您正苦于以下问题:Python client.V1Volume方法的具体用法?Python client.V1Volume怎么用?Python client.V1Volume使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在kubernetes.client的用法示例。


在下文中一共展示了client.V1Volume方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mount_pvc

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def mount_pvc(pvc_name='pipeline-claim', volume_name='pipeline', volume_mount_path='/mnt/pipeline'):
    """
        Modifier function to apply to a Container Op to simplify volume, volume mount addition and
        enable better reuse of volumes, volume claims across container ops.
        Usage:
            train = train_op(...)
            train.apply(mount_pvc('claim-name', 'pipeline', '/mnt/pipeline'))
    """
    def _mount_pvc(task):
        from kubernetes import client as k8s_client
        # there can be other ops in a pipeline (e.g. ResourceOp, VolumeOp)
        # refer to #3906
        if not hasattr(task, "add_volume") or not hasattr(task, "add_volume_mount"):
            return task
        local_pvc = k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
        return (
            task
                .add_volume(
                    k8s_client.V1Volume(name=volume_name, persistent_volume_claim=local_pvc)
                )
                .add_volume_mount(
                    k8s_client.V1VolumeMount(mount_path=volume_mount_path, name=volume_name)
                )
        )
    return _mount_pvc 
开发者ID:kubeflow,项目名称:pipelines,代码行数:27,代码来源:onprem.py

示例2: volume_pipeline

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def volume_pipeline():
  op1 = dsl.ContainerOp(
      name='download',
      image='google/cloud-sdk',
      command=['sh', '-c'],
      arguments=['ls | tee /tmp/results.txt'],
      file_outputs={'downloaded': '/tmp/results.txt'}) \
    .add_volume(k8s_client.V1Volume(name='gcp-credentials',
                                   secret=k8s_client.V1SecretVolumeSource(
                                       secret_name='user-gcp-sa'))) \
    .add_volume_mount(k8s_client.V1VolumeMount(
      mount_path='/secret/gcp-credentials', name='gcp-credentials')) \
    .add_env_variable(k8s_client.V1EnvVar(
      name='GOOGLE_APPLICATION_CREDENTIALS',
      value='/secret/gcp-credentials/user-gcp-sa.json')) \
    .add_env_variable(k8s_client.V1EnvVar(name='Foo', value='bar'))
  op2 = dsl.ContainerOp(
      name='echo',
      image='library/bash',
      command=['sh', '-c'],
      arguments=['echo %s' % op1.output]) 
开发者ID:kubeflow,项目名称:pipelines,代码行数:23,代码来源:volume.py

示例3: get_pod_volumes

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def get_pod_volumes(
        self,
        docker_volumes: Sequence[DockerVolume],
        aws_ebs_volumes: Sequence[AwsEbsVolume],
    ) -> Sequence[V1Volume]:
        pod_volumes = []
        unique_docker_volumes = {
            self.get_docker_volume_name(docker_volume): docker_volume
            for docker_volume in docker_volumes
        }
        for name, docker_volume in unique_docker_volumes.items():
            pod_volumes.append(
                V1Volume(
                    host_path=V1HostPathVolumeSource(path=docker_volume["hostPath"]),
                    name=name,
                )
            )
        unique_aws_ebs_volumes = {
            self.get_aws_ebs_volume_name(aws_ebs_volume): aws_ebs_volume
            for aws_ebs_volume in aws_ebs_volumes
        }
        for name, aws_ebs_volume in unique_aws_ebs_volumes.items():
            pod_volumes.append(
                V1Volume(
                    aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource(
                        volume_id=aws_ebs_volume["volume_id"],
                        fs_type=aws_ebs_volume.get("fs_type"),
                        partition=aws_ebs_volume.get("partition"),
                        # k8s wants RW volume even if it's later mounted RO
                        read_only=False,
                    ),
                    name=name,
                )
            )
        return pod_volumes 
开发者ID:Yelp,项目名称:paasta,代码行数:37,代码来源:kubernetes_tools.py

示例4: create_job_object

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def create_job_object(runner_image, region, s3_path, pvc_name):
  target_folder = get_target_folder(s3_path)

  # Configureate Pod template container
  container = k8s_client.V1Container(
      name="copy-dataset-worker",
      image=runner_image,
      command=["aws"],
      args=["s3", "sync", s3_path, "/mnt/" + target_folder],
      volume_mounts=[k8s_client.V1VolumeMount(name="data-storage", mount_path='/mnt')],
      env=[k8s_client.V1EnvVar(name="AWS_REGION", value=region),
        k8s_client.V1EnvVar(name="AWS_ACCESS_KEY_ID", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_ACCESS_KEY_ID", name="aws-secret"))),
        k8s_client.V1EnvVar(name="AWS_SECRET_ACCESS_KEY", value_from=k8s_client.V1EnvVarSource(secret_key_ref=k8s_client.V1SecretKeySelector(key="AWS_SECRET_ACCESS_KEY", name="aws-secret")))
        ],
    )
  volume = k8s_client.V1Volume(
    name='data-storage',
    persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(claim_name=pvc_name)
  )
  # Create and configurate a spec section
  template = k8s_client.V1PodTemplateSpec(
      # metadata=k8s_client.V1ObjectMeta(labels={"app":"copy-dataset-worker"}),
      spec=k8s_client.V1PodSpec(containers=[container], volumes=[volume], restart_policy="OnFailure"))
  # Create the specification of deployment
  spec = k8s_client.V1JobSpec(
      # selector=k8s_client.V1LabelSelector(match_labels={"app":"copy-dataset-worker"}),
      template=template)
  # Instantiate the deployment object
  deployment = k8s_client.V1Job(
      api_version="batch/v1",
      kind="Job",
      metadata=k8s_client.V1ObjectMeta(name=container.name),
      spec=spec)

  return deployment 
开发者ID:aws-samples,项目名称:aws-eks-deep-learning-benchmark,代码行数:37,代码来源:copy_dataset.py

示例5: _host_volume

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def _host_volume(name, path, type):
    return client.V1Volume(
        name=name,
        host_path=client.V1HostPathVolumeSource(path=path, type=type)
    ) 
开发者ID:anibali,项目名称:margipose,代码行数:7,代码来源:deploy.py

示例6: resourceop_basic

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def resourceop_basic(username, password):
    secret_resource = k8s_client.V1Secret(
        api_version="v1",
        kind="Secret",
        metadata=k8s_client.V1ObjectMeta(generate_name="my-secret-"),
        type="Opaque",
        data={"username": username, "password": password}
    )
    rop = dsl.ResourceOp(
        name="create-my-secret",
        k8s_resource=secret_resource,
        attribute_outputs={"name": "{.metadata.name}"}
    )

    secret = k8s_client.V1Volume(
        name="my-secret",
        secret=k8s_client.V1SecretVolumeSource(secret_name=rop.output)
    )

    cop = dsl.ContainerOp(
        name="cop",
        image="library/bash:4.4.23",
        command=["sh", "-c"],
        arguments=["ls /etc/secret-volume"],
        pvolumes={"/etc/secret-volume": secret}
    ) 
开发者ID:kubeflow,项目名称:pipelines,代码行数:28,代码来源:resourceop_basic.py

示例7: use_ai_pipeline_params

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def use_ai_pipeline_params(secret_name, secret_volume_mount_path='/app/secrets', image_pull_policy='IfNotPresent'):
    def _use_ai_pipeline_params(task):
        from kubernetes import client as k8s_client
        task = task.add_volume(k8s_client.V1Volume(name=secret_name,  # secret_name as volume name
                                                   secret=k8s_client.V1SecretVolumeSource(secret_name=secret_name)))
        task.container.add_volume_mount(k8s_client.V1VolumeMount(mount_path=secret_volume_mount_path, 
                                                                 name=secret_name))
        task.container.set_image_pull_policy(image_pull_policy)
        return task
    return _use_ai_pipeline_params


# create pipelines 
开发者ID:kubeflow,项目名称:pipelines,代码行数:15,代码来源:watson_train_serve_pipeline.py

示例8: _SetupModelVolumeIfNeeded

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def _SetupModelVolumeIfNeeded(self, pod_manifest: k8s_client.V1Pod):
    mount = self._FindVolumeMountForPath(self._model_path)
    if not mount:
      return
    [volume] = [v for v in self._executor_pod.spec.volumes
                if v.name == mount.name]
    if volume.persistent_volume_claim is None:
      raise NotImplementedError('Only PersistentVolumeClaim is allowed.')
    claim_name = volume.persistent_volume_claim.claim_name
    pvc = self._k8s_core_api.read_namespaced_persistent_volume_claim(
        name=claim_name,
        namespace=self._namespace)

    # PersistentVolumeClaim for pipeline root SHOULD have ReadWriteMany access
    # mode. Although it is allowed to mount ReadWriteOnce volume if Pods share
    # the Node, there's no guarantee the model server Pod will be launched in
    # the same Node.
    if all(access_mode != _AccessMode.READ_WRITE_MANY.value
           for access_mode in pvc.spec.access_modes):
      raise RuntimeError('Access mode should be ReadWriteMany.')

    logging.info('PersistentVolumeClaim %s will be mounted to %s.',
                 pvc, mount.mount_path)

    pod_manifest.spec.volumes.append(
        k8s_client.V1Volume(
            name=_MODEL_SERVER_MODEL_VOLUME_NAME,
            persistent_volume_claim=k8s_client
            .V1PersistentVolumeClaimVolumeSource(
                claim_name=claim_name,
                read_only=True)))
    container_manifest = _get_container_or_error(
        pod_manifest, container_name=_MODEL_SERVER_CONTAINER_NAME)
    container_manifest.volume_mounts.append(
        k8s_client.V1VolumeMount(
            name=_MODEL_SERVER_MODEL_VOLUME_NAME,
            mount_path=mount.mount_path,
            read_only=True,
        )
    ) 
开发者ID:tensorflow,项目名称:tfx,代码行数:42,代码来源:kubernetes_runner.py

示例9: onnx_pipeline

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def onnx_pipeline(
  model,
  output_onnx_path, 
  model_type,
  output_perf_result_path,
  execution_providers="",
  model_inputs_names="", 
  model_outputs_names="",
  model_input_shapes="",
  model_initial_types="",
  caffe_model_prototxt="",
  target_opset=7):

  # Create a component named "Convert To ONNX" and "ONNX Runtime Perf". Edit the V1PersistentVolumeClaimVolumeSource 
  # name to match the persistent volume claim you created if needed. By default the names match ../azure-files-sc.yaml 
  # and ../azure-files-pvc.yaml
  convert_op = onnxConverterOp('Convert To ONNX', 
    '%s' % model, 
    '%s' % output_onnx_path, 
    '%s' % model_type,
    '%s' % model_inputs_names, 
    '%s' % model_outputs_names,
    '%s' % model_input_shapes,
    '%s' % model_initial_types,
    '%s' % caffe_model_prototxt,
    '%s' % target_opset).add_volume(
        k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name='azurefile'))).add_volume_mount(k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs'))   

  perf_op = perfTestOp('ONNX Runtime Perf', 
    convert_op.output,
    '%s' % output_perf_result_path,
    '%s' % execution_providers,
    ).add_volume(
        k8s_client.V1Volume(name='pipeline-nfs', persistent_volume_claim=k8s_client.V1PersistentVolumeClaimVolumeSource(
            claim_name='azurefile'))).add_volume_mount(
    k8s_client.V1VolumeMount(mount_path='/mnt', name='pipeline-nfs')).set_gpu_limit(1)

  dsl.get_pipeline_conf().set_image_pull_secrets([k8s_client.V1ObjectReference(name="regcred")]) 
开发者ID:microsoft,项目名称:OLive,代码行数:41,代码来源:kubeflow-pipeline.py

示例10: use_secret

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def use_secret(secret_name:str, secret_volume_mount_path:str, env_variable:str=None, secret_file_path_in_volume:str=None):
    """    
       An operator that configures the container to use a secret.
       
       This assumes that the secret is created and availabel in the k8s cluster.
    
    Keyword Arguments:
        secret_name {String} -- [Required] The k8s secret name.
        secret_volume_mount_path {String} -- [Required] The path to the secret that is mounted.
        env_variable {String} -- Env variable pointing to the mounted secret file. Requires both the env_variable and secret_file_path_in_volume to be defined. 
                                 The value is the path to the secret.
        secret_file_path_in_volume {String} -- The path to the secret in the volume. This will be the value of env_variable. 
                                 Both env_variable and secret_file_path_in_volume needs to be set if any env variable should be created.
    
    Raises:
        ValueError: If not the necessary variables (secret_name, volume_name", secret_volume_mount_path) are supplied.
                    Or only one of  env_variable and secret_file_path_in_volume are supplied
    
    Returns:
        [ContainerOperator] -- Returns the container operator after it has been modified. 
    """

    secret_name = str(secret_name)
    if '{{' in secret_name:
        volume_name = ''.join(random.choices(string.ascii_lowercase + string.digits, k=10)) + "_volume"
    else:
        volume_name = secret_name
    for param, param_name in zip([secret_name, secret_volume_mount_path],["secret_name","secret_volume_mount_path"]):
        if param == "":
            raise ValueError("The '{}' must not be empty".format(param_name))
    if bool(env_variable) != bool(secret_file_path_in_volume):
        raise ValueError("Both {} and {} needs to be supplied together or not at all".format(env_variable, secret_file_path_in_volume))

    def _use_secret(task):
        import os 
        from kubernetes import client as k8s_client
        task = task.add_volume(
            k8s_client.V1Volume(
                name=volume_name,
                secret=k8s_client.V1SecretVolumeSource(
                    secret_name=secret_name
                )
            )
        ).add_volume_mount(
                k8s_client.V1VolumeMount(
                    name=volume_name,
                    mount_path=secret_volume_mount_path
                )
            )
        if env_variable:
            task.container.add_env_variable(
                k8s_client.V1EnvVar(
                    name=env_variable,
                    value=os.path.join(secret_volume_mount_path, secret_file_path_in_volume),
                )
            )
        return task
    
    return _use_secret 
开发者ID:kubeflow,项目名称:pipelines,代码行数:61,代码来源:kubernetes.py

示例11: use_gcp_secret

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def use_gcp_secret(secret_name='user-gcp-sa', secret_file_path_in_volume=None, volume_name=None, secret_volume_mount_path='/secret/gcp-credentials'):
    """An operator that configures the container to use GCP service account by service account key
        stored in a Kubernetes secret.

        For cluster setup and alternatives to using service account key, check https://www.kubeflow.org/docs/gke/authentication-pipelines/.
    """

    # permitted values for secret_name = ['admin-gcp-sa', 'user-gcp-sa']
    if secret_file_path_in_volume is None:
        secret_file_path_in_volume = '/' + secret_name + '.json'

    if volume_name is None:
        volume_name = 'gcp-credentials-' + secret_name

    else:
        import warnings
        warnings.warn('The volume_name parameter is deprecated and will be removed in next release. The volume names are now generated automatically.', DeprecationWarning)
    
    def _use_gcp_secret(task):
        from kubernetes import client as k8s_client
        task = task.add_volume(
            k8s_client.V1Volume(
                name=volume_name,
                secret=k8s_client.V1SecretVolumeSource(
                    secret_name=secret_name,
                )
            )
        )
        task.container \
            .add_volume_mount(
                    k8s_client.V1VolumeMount(
                        name=volume_name,
                        mount_path=secret_volume_mount_path,
                    )
                ) \
            .add_env_variable(
                k8s_client.V1EnvVar(
                    name='GOOGLE_APPLICATION_CREDENTIALS',
                    value=secret_volume_mount_path + secret_file_path_in_volume,
                )
            ) \
            .add_env_variable(
                k8s_client.V1EnvVar(
                    name='CLOUDSDK_AUTH_CREDENTIAL_FILE_OVERRIDE',
                    value=secret_volume_mount_path + secret_file_path_in_volume,
                )
            ) # Set GCloud Credentials by using the env var override.
              # TODO: Is there a better way for GCloud to pick up the credential?
        return task
    
    return _use_gcp_secret 
开发者ID:kubeflow,项目名称:pipelines,代码行数:53,代码来源:gcp.py

示例12: _AssumeInsideKfp

# 需要导入模块: from kubernetes import client [as 别名]
# 或者: from kubernetes.client import V1Volume [as 别名]
def _AssumeInsideKfp(
      self,
      namespace='my-namespace',
      pod_name='my-pod-name',
      pod_uid='my-pod-uid',
      pod_service_account_name='my-service-account-name',
      with_pvc=False):
    pod = k8s_client.V1Pod(
        api_version='v1',
        kind='Pod',
        metadata=k8s_client.V1ObjectMeta(
            name=pod_name,
            uid=pod_uid,
        ),
        spec=k8s_client.V1PodSpec(
            containers=[
                k8s_client.V1Container(
                    name='main',
                    volume_mounts=[]),
            ],
            volumes=[]))

    if with_pvc:
      pod.spec.volumes.append(
          k8s_client.V1Volume(
              name='my-volume',
              persistent_volume_claim=k8s_client
              .V1PersistentVolumeClaimVolumeSource(
                  claim_name='my-pvc')))
      pod.spec.containers[0].volume_mounts.append(
          k8s_client.V1VolumeMount(
              name='my-volume',
              mount_path=self._base_dir))

    mock.patch.object(kube_utils, 'is_inside_kfp', return_value=True).start()
    pod.spec.service_account_name = pod_service_account_name
    mock.patch.object(kube_utils, 'get_current_kfp_pod',
                      return_value=pod).start()
    mock.patch.object(kube_utils, 'get_kfp_namespace',
                      return_value=namespace).start()
    if with_pvc:
      (self._mock_core_v1_api.read_namespaced_persistent_volume_claim
       .return_value) = k8s_client.V1PersistentVolumeClaim(
           metadata=k8s_client.V1ObjectMeta(
               name='my-pvc'),
           spec=k8s_client.V1PersistentVolumeClaimSpec(
               access_modes=['ReadWriteMany'])) 
开发者ID:tensorflow,项目名称:tfx,代码行数:49,代码来源:kubernetes_runner_test.py


注:本文中的kubernetes.client.V1Volume方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。