當前位置: 首頁>>代碼示例>>Python>>正文


Python pykube.all方法代碼示例

本文整理匯總了Python中pykube.all方法的典型用法代碼示例。如果您正苦於以下問題:Python pykube.all方法的具體用法?Python pykube.all怎麽用?Python pykube.all使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pykube的用法示例。


在下文中一共展示了pykube.all方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: main

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--seconds', type=int, default=3600, help='Delete all finished jobs older than ..')
    parser.add_argument('--timeout-seconds', type=int, default=-1, help='Kill all jobs older than ..')
    parser.add_argument('--dry-run', action='store_true', help='Dry run mode')
    parser.add_argument('--namespace', type=str, default=None, help='Only search for completed jobs in a single namespace')
    args = parser.parse_args()

    try:
        config = pykube.KubeConfig.from_service_account()
    except FileNotFoundError:
        # local testing
        config = pykube.KubeConfig.from_file(os.path.expanduser('~/.kube/config'))
    api = pykube.HTTPClient(config)

    namespace = args.namespace or pykube.all

    for job in pykube.Job.objects(api, namespace=namespace):
        delete_if_expired(args.dry_run, job, job_expired(args.seconds, args.timeout_seconds, job))

    for pod in pykube.Pod.objects(api, namespace=namespace):
        delete_if_expired(args.dry_run, pod, pod_expired(args.seconds, pod)) 
開發者ID:hjacobs,項目名稱:kube-job-cleaner,代碼行數:24,代碼來源:cleaner.py

示例2: autoscale

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def autoscale(buffer_percentage: dict, buffer_fixed: dict,
              scale_down_step_fixed: int, scale_down_step_percentage: float,
              buffer_spare_nodes: int = 0, include_master_nodes: bool=False,
              dry_run: bool=False, disable_scale_down: bool=False):
    api = get_kube_api()

    all_nodes = get_nodes(api, include_master_nodes)
    region = list(all_nodes.values())[0]['region']
    autoscaling = boto3.client('autoscaling', region)
    nodes_by_asg_zone = get_nodes_by_asg_zone(autoscaling, all_nodes)

    # we only consider nodes found in an ASG (old "ghost" nodes returned from Kubernetes API are ignored)
    nodes_by_name = get_nodes_by_name(itertools.chain(*nodes_by_asg_zone.values()))

    pods = pykube.Pod.objects(api, namespace=pykube.all)

    usage_by_asg_zone = calculate_usage_by_asg_zone(pods, nodes_by_name)
    asg_size = calculate_required_auto_scaling_group_sizes(nodes_by_asg_zone, usage_by_asg_zone, buffer_percentage, buffer_fixed,
                                                           buffer_spare_nodes=buffer_spare_nodes, disable_scale_down=disable_scale_down)
    asg_size = slow_down_downscale(asg_size, nodes_by_asg_zone, scale_down_step_fixed, scale_down_step_percentage)
    ready_nodes_by_asg = get_ready_nodes_by_asg(nodes_by_asg_zone)
    resize_auto_scaling_groups(autoscaling, asg_size, ready_nodes_by_asg, dry_run) 
開發者ID:hjacobs,項目名稱:kube-aws-autoscaler,代碼行數:24,代碼來源:main.py

示例3: watch

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def watch(
            self,
            resource_type: Type[Resource],
    ) -> Iterable[_WatchEvent]:
        """
        Sync wrapper for :any:`pykube.query.Query().watch().object_stream()`
        """
        return resource_type.objects(self.client_factory())\
            .filter(namespace=pykube.all).watch().object_stream() 
開發者ID:miracle2k,項目名稱:k8s-snapshots,代碼行數:11,代碼來源:kube.py

示例4: get_running_instances_map

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def get_running_instances_map(self, nodes, azure_groups):
        """
        given a list of KubeNode's, return a map of
        instance_id -> ec2.Instance object
        """
        instance_map = {}

        # first get azure instances
        for group in azure_groups:
            if isinstance(group, azure.AzureVirtualScaleSet):
                for instance in group.get_azure_instances():
                    instance_map[instance.id] = instance

        # now get aws instances
        instance_id_by_region = {}
        for node in nodes:
            if node.provider == 'aws':
                instance_id_by_region.setdefault(node.region, []).append(node.instance_id)

        for region, instance_ids in instance_id_by_region.items():
            # note that this assumes that all instances have a valid region
            # the regions referenced by the nodes may also be outside of the
            # list of regions provided by the user
            # this should be ok because they will just end up being nodes
            # unmanaged by autoscaling groups we know about
            region_instances = self.get_running_instances_in_region(
                region, instance_ids)
            instance_map.update((inst.id, inst) for inst in region_instances)

        return instance_map 
開發者ID:openai,項目名稱:kubernetes-ec2-autoscaler,代碼行數:32,代碼來源:cluster.py

示例5: pods_force_uptime

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def pods_force_uptime(api, namespace: str):
    """Return True if there are any running pods which require the deployments to be scaled back up."""
    for pod in pykube.Pod.objects(api).filter(namespace=(namespace or pykube.all)):
        if pod.obj.get("status", {}).get("phase") in ("Succeeded", "Failed"):
            continue
        if pod.annotations.get(FORCE_UPTIME_ANNOTATION, "").lower() == "true":
            logger.info(f"Forced uptime because of {pod.namespace}/{pod.name}")
            return True
    return False 
開發者ID:hjacobs,項目名稱:kube-downscaler,代碼行數:11,代碼來源:scaler.py

示例6: info

# 需要導入模塊: import pykube [as 別名]
# 或者: from pykube import all [as 別名]
def info(self) -> ClusterStats:  # pylint: disable=too-many-locals
        """Retrieve Kubernetes cluster statistics."""
        pl_status = ClusterStats()

        node_list = pykube.Node.objects(self.api).filter(namespace=pykube.all).iterator()
        node_dict = {}

        # Get basic information from nodes
        for node in node_list:
            nss = NodeStats(node.name)
            nss.cores_total = float(node.obj['status']['allocatable']['cpu'])
            nss.memory_total = humanfriendly.parse_size(node.obj['status']['allocatable']['memory'])
            nss.labels = node.obj['metadata']['labels']
            nss.status = 'online'
            node_dict[str(socket.gethostbyname(node.name))] = nss

        # Get information from all running pods, then accumulate to nodes
        pod_list = pykube.Pod.objects(self.api).filter(namespace=pykube.all).iterator()
        for pod in pod_list:
            try:
                host_ip = pod.obj['status']['hostIP']
            except KeyError:
                continue
            nss = node_dict[host_ip]
            nss.container_count += 1
            spec_cont = pod.obj['spec']['containers'][0]
            if 'resources' in spec_cont:
                if 'requests' in spec_cont['resources']:
                    if 'memory' in spec_cont['resources']['requests']:
                        memory = spec_cont['resources']['requests']['memory']
                        nss.memory_reserved = nss.memory_reserved + humanfriendly.parse_size(memory)
                    if 'cpu' in spec_cont['resources']['requests']:
                        cpu = spec_cont['resources']['requests']['cpu']
                        # ex: cpu could be 100m or 0.1
                        cpu_splitted = cpu.split('m')
                        if len(cpu_splitted) > 1:
                            cpu_float = int(cpu_splitted[0]) / 1000
                        else:
                            cpu_float = int(cpu_splitted[0])
                        nss.cores_reserved = round(nss.cores_reserved + cpu_float, 3)

        for node_ip in node_dict:
            pl_status.nodes.append(node_dict[node_ip])

        return pl_status 
開發者ID:DistributedSystemsGroup,項目名稱:zoe,代碼行數:47,代碼來源:api_client.py


注:本文中的pykube.all方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。