当前位置: 首页>>代码示例>>Python>>正文


Python WorkerConfiguration.make_pod方法代码示例

本文整理汇总了Python中airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration.make_pod方法的典型用法代码示例。如果您正苦于以下问题:Python WorkerConfiguration.make_pod方法的具体用法?Python WorkerConfiguration.make_pod怎么用?Python WorkerConfiguration.make_pod使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration的用法示例。


在下文中一共展示了WorkerConfiguration.make_pod方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_make_pod_git_sync_ssh_without_known_hosts

# 需要导入模块: from airflow.contrib.kubernetes.worker_configuration import WorkerConfiguration [as 别名]
# 或者: from airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration import make_pod [as 别名]
    def test_make_pod_git_sync_ssh_without_known_hosts(self):
        # Tests the pod created with git-sync SSH authentication option is correct without known hosts
        self.kube_config.airflow_configmap = 'airflow-configmap'
        self.kube_config.git_ssh_key_secret_name = 'airflow-secrets'
        self.kube_config.dags_volume_claim = None
        self.kube_config.dags_volume_host = None
        self.kube_config.dags_in_image = None
        self.kube_config.worker_fs_group = None

        worker_config = WorkerConfiguration(self.kube_config)
        kube_executor_config = KubernetesExecutorConfig(annotations=[],
                                                        volumes=[],
                                                        volume_mounts=[])

        pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), 1, "bash -c 'ls /'",
                                     kube_executor_config)

        init_containers = worker_config._get_init_containers()
        git_ssh_key_file = next((x['value'] for x in init_containers[0]['env']
                                if x['name'] == 'GIT_SSH_KEY_FILE'), None)
        volume_mount_ssh_key = next((x['mountPath'] for x in init_containers[0]['volumeMounts']
                                    if x['name'] == worker_config.git_sync_ssh_secret_volume_name),
                                    None)
        self.assertTrue(git_ssh_key_file)
        self.assertTrue(volume_mount_ssh_key)
        self.assertEqual(65533, pod.security_context['fsGroup'])
        self.assertEqual(git_ssh_key_file,
                         volume_mount_ssh_key,
                         'The location where the git ssh secret is mounted'
                         ' needs to be the same as the GIT_SSH_KEY_FILE path')
开发者ID:Fokko,项目名称:incubator-airflow,代码行数:33,代码来源:test_kubernetes_executor.py

示例2: test_make_pod_with_executor_config

# 需要导入模块: from airflow.contrib.kubernetes.worker_configuration import WorkerConfiguration [as 别名]
# 或者: from airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration import make_pod [as 别名]
    def test_make_pod_with_executor_config(self):
        worker_config = WorkerConfiguration(self.kube_config)
        kube_executor_config = KubernetesExecutorConfig(affinity=self.affinity_config,
                                                        tolerations=self.tolerations_config,
                                                        annotations=[],
                                                        volumes=[],
                                                        volume_mounts=[]
                                                        )

        pod = worker_config.make_pod("default", str(uuid.uuid4()), "test_pod_id", "test_dag_id",
                                     "test_task_id", str(datetime.utcnow()), "bash -c 'ls /'",
                                     kube_executor_config)

        self.assertTrue(pod.affinity['podAntiAffinity'] is not None)
        self.assertEqual('app',
                         pod.affinity['podAntiAffinity']
                         ['requiredDuringSchedulingIgnoredDuringExecution'][0]
                         ['labelSelector']
                         ['matchExpressions'][0]
                         ['key'])

        self.assertEqual(2, len(pod.tolerations))
        self.assertEqual('prod', pod.tolerations[1]['key'])
开发者ID:sguarrinieye,项目名称:airflow,代码行数:25,代码来源:test_kubernetes_executor.py

示例3: AirflowKubernetesScheduler

# 需要导入模块: from airflow.contrib.kubernetes.worker_configuration import WorkerConfiguration [as 别名]
# 或者: from airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration import make_pod [as 别名]
class AirflowKubernetesScheduler(LoggingMixin):
    def __init__(self, kube_config, task_queue, result_queue, session,
                 kube_client, worker_uuid):
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration = WorkerConfiguration(kube_config=self.kube_config)
        self.watcher_queue = multiprocessing.Queue()
        self._session = session
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher()

    def _make_kube_watcher(self):
        resource_version = KubeResourceVersion.get_current_resource_version(self._session)
        watcher = KubernetesJobWatcher(self.namespace, self.watcher_queue,
                                       resource_version, self.worker_uuid)
        watcher.start()
        return watcher

    def _health_check_kube_watcher(self):
        if self.kube_watcher.is_alive():
            pass
        else:
            self.log.error(
                'Error while health checking kube watcher process. '
                'Process died for unknown reasons')
            self.kube_watcher = self._make_kube_watcher()

    def run_next(self, next_job):
        """

        The run_next command will check the task_queue for any un-run jobs.
        It will then create a unique job-id, launch that job in the cluster,
        and store relevant info in the current_jobs map so we can track the job's
        status
        """
        self.log.info('Kubernetes job is %s', str(next_job))
        key, command, kube_executor_config = next_job
        dag_id, task_id, execution_date = key
        self.log.debug("Kubernetes running for command %s", command)
        self.log.debug("Kubernetes launching image %s", self.kube_config.kube_image)
        pod = self.worker_configuration.make_pod(
            namespace=self.namespace, worker_uuid=self.worker_uuid,
            pod_id=self._create_pod_id(dag_id, task_id),
            dag_id=dag_id, task_id=task_id,
            execution_date=self._datetime_to_label_safe_datestring(execution_date),
            airflow_command=command, kube_executor_config=kube_executor_config
        )
        # the watcher will monitor pods, so we do not block.
        self.launcher.run_pod_async(pod)
        self.log.debug("Kubernetes Job created!")

    def delete_pod(self, pod_id):
        if self.kube_config.delete_worker_pods:
            try:
                self.kube_client.delete_namespaced_pod(
                    pod_id, self.namespace, body=client.V1DeleteOptions())
            except ApiException as e:
                # If the pod is already deleted
                if e.status != 404:
                    raise

    def sync(self):
        """
        The sync function checks the status of all currently running kubernetes jobs.
        If a job is completed, it's status is placed in the result queue to
        be sent back to the scheduler.

        :return:

        """
        self._health_check_kube_watcher()
        while not self.watcher_queue.empty():
            self.process_watcher_task()

    def process_watcher_task(self):
        pod_id, state, labels, resource_version = self.watcher_queue.get()
        self.log.info(
            'Attempting to finish pod; pod_id: %s; state: %s; labels: %s',
            pod_id, state, labels
        )
        key = self._labels_to_key(labels=labels)
        if key:
            self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
            self.result_queue.put((key, state, pod_id, resource_version))

    @staticmethod
    def _strip_unsafe_kubernetes_special_chars(string):
        """
        Kubernetes only supports lowercase alphanumeric characters and "-" and "." in
        the pod name
        However, there are special rules about how "-" and "." can be used so let's
        only keep
        alphanumeric chars  see here for detail:
        https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
#.........这里部分代码省略.........
开发者ID:doordash,项目名称:incubator-airflow,代码行数:103,代码来源:kubernetes_executor.py

示例4: AirflowKubernetesScheduler

# 需要导入模块: from airflow.contrib.kubernetes.worker_configuration import WorkerConfiguration [as 别名]
# 或者: from airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration import make_pod [as 别名]
class AirflowKubernetesScheduler(LoggingMixin):
    def __init__(self, kube_config, task_queue, result_queue, kube_client, worker_uuid):
        self.log.debug("Creating Kubernetes executor")
        self.kube_config = kube_config
        self.task_queue = task_queue
        self.result_queue = result_queue
        self.namespace = self.kube_config.kube_namespace
        self.log.debug("Kubernetes using namespace %s", self.namespace)
        self.kube_client = kube_client
        self.launcher = PodLauncher(kube_client=self.kube_client)
        self.worker_configuration = WorkerConfiguration(kube_config=self.kube_config)
        self.watcher_queue = SynchronizedQueue()
        self.worker_uuid = worker_uuid
        self.kube_watcher = self._make_kube_watcher()

    def _make_kube_watcher(self):
        resource_version = KubeResourceVersion.get_current_resource_version()
        watcher = KubernetesJobWatcher(self.namespace, self.watcher_queue,
                                       resource_version, self.worker_uuid)
        watcher.start()
        return watcher

    def _health_check_kube_watcher(self):
        if self.kube_watcher.is_alive():
            pass
        else:
            self.log.error(
                'Error while health checking kube watcher process. '
                'Process died for unknown reasons')
            self.kube_watcher = self._make_kube_watcher()

    def run_next(self, next_job):
        """

        The run_next command will check the task_queue for any un-run jobs.
        It will then create a unique job-id, launch that job in the cluster,
        and store relevant info in the current_jobs map so we can track the job's
        status
        """
        self.log.info('Kubernetes job is %s', str(next_job))
        key, command, kube_executor_config = next_job
        dag_id, task_id, execution_date, try_number = key
        self.log.debug("Kubernetes running for command %s", command)
        self.log.debug("Kubernetes launching image %s", self.kube_config.kube_image)
        pod = self.worker_configuration.make_pod(
            namespace=self.namespace, worker_uuid=self.worker_uuid,
            pod_id=self._create_pod_id(dag_id, task_id),
            dag_id=self._make_safe_label_value(dag_id),
            task_id=self._make_safe_label_value(task_id),
            try_number=try_number,
            execution_date=self._datetime_to_label_safe_datestring(execution_date),
            airflow_command=command, kube_executor_config=kube_executor_config
        )
        # the watcher will monitor pods, so we do not block.
        self.launcher.run_pod_async(pod)
        self.log.debug("Kubernetes Job created!")

    def delete_pod(self, pod_id):
        if self.kube_config.delete_worker_pods:
            try:
                self.kube_client.delete_namespaced_pod(
                    pod_id, self.namespace, body=client.V1DeleteOptions())
            except ApiException as e:
                # If the pod is already deleted
                if e.status != 404:
                    raise

    def sync(self):
        """
        The sync function checks the status of all currently running kubernetes jobs.
        If a job is completed, it's status is placed in the result queue to
        be sent back to the scheduler.

        :return:

        """
        self._health_check_kube_watcher()
        while not self.watcher_queue.empty():
            self.process_watcher_task()

    def process_watcher_task(self):
        pod_id, state, labels, resource_version = self.watcher_queue.get()
        self.log.info(
            'Attempting to finish pod; pod_id: %s; state: %s; labels: %s',
            pod_id, state, labels
        )
        key = self._labels_to_key(labels=labels)
        if key:
            self.log.debug('finishing job %s - %s (%s)', key, state, pod_id)
            self.result_queue.put((key, state, pod_id, resource_version))

    @staticmethod
    def _strip_unsafe_kubernetes_special_chars(string):
        """
        Kubernetes only supports lowercase alphanumeric characters and "-" and "." in
        the pod name
        However, there are special rules about how "-" and "." can be used so let's
        only keep
        alphanumeric chars  see here for detail:
        https://kubernetes.io/docs/concepts/overview/working-with-objects/names/
#.........这里部分代码省略.........
开发者ID:sekikn,项目名称:incubator-airflow,代码行数:103,代码来源:kubernetes_executor.py


注:本文中的airflow.contrib.kubernetes.worker_configuration.WorkerConfiguration.make_pod方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。