本文整理汇总了Python中humanfriendly.parse_size方法的典型用法代码示例。如果您正苦于以下问题:Python humanfriendly.parse_size方法的具体用法?Python humanfriendly.parse_size怎么用?Python humanfriendly.parse_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类humanfriendly
的用法示例。
在下文中一共展示了humanfriendly.parse_size方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse_container_resources
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def parse_container_resources(resources: Mapping[str, str]) -> KubeContainerResources:
cpu_str = resources.get("cpu")
if not cpu_str:
cpus = None
elif cpu_str[-1] == "m":
cpus = float(cpu_str[:-1]) / 1000
else:
cpus = float(cpu_str)
mem_str = resources.get("memory")
if not mem_str:
mem_mb = None
else:
mem_mb = parse_size(mem_str) / 1000000
disk_str = resources.get("ephemeral-storage")
if not disk_str:
disk_mb = None
else:
disk_mb = parse_size(disk_str) / 1000000
return KubeContainerResources(cpus=cpus, mem=mem_mb, disk=disk_mb)
示例2: render_container_spec
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def render_container_spec(app_name, proc):
c = ContainerSpec()
c.Image = proc.image
c.Env = copy.deepcopy(proc.env)
c.set_env("TZ", 'Asia/Shanghai')
c.User = '' if not hasattr(proc, 'user') else proc.user
c.WorkingDir = '' if not hasattr(proc, 'working_dir') else proc.working_dir
c.DnsSearch = [] if not hasattr(
proc, 'dns_search') else copy.deepcopy(proc.dns_search)
c.Volumes = copy.deepcopy(proc.volumes)
c.SystemVolumes = copy.deepcopy(
proc.system_volumes) + get_system_volumes_from_etcd(app_name)
c.CloudVolumes = render_cloud_volumes(proc.cloud_volumes)
c.Command = proc.cmd
c.Entrypoint = proc.entrypoint
c.CpuLimit = proc.cpu
c.MemoryLimit = humanfriendly.parse_size(proc.memory)
c.Expose = 0 if not proc.port else proc.port.keys()[0]
c.LogConfig = None
return c
示例3: maxsize
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def maxsize():
'''
Determines the configured size limit (in GB) for Windows containers
'''
if platform.system() != 'Windows':
return -1
config = DockerUtils.getConfig()
if 'storage-opts' in config:
sizes = [opt.replace('size=', '') for opt in config['storage-opts'] if 'size=' in opt]
if len(sizes) > 0:
return humanfriendly.parse_size(sizes[0]) / 1000000000
# The default limit on image size is 20GB
# (https://docs.microsoft.com/en-us/visualstudio/install/build-tools-container-issues)
return 20.0
示例4: _get_human_friendly_used_space
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def _get_human_friendly_used_space(info):
return parse_size(info['usedSpace'].replace(',', ''))
示例5: _export_container
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def _export_container(self, container, release_image_name: str, temp_directory: str):
generator = container.export(chunk_size=humanfriendly.parse_size("10mb"))
export_file = temp_directory + "/export.tar"
with open(export_file, "wb") as file:
still_running_logger = StillRunningLogger(
self.logger, "Export image %s" % release_image_name)
for chunk in generator:
still_running_logger.log()
file.write(chunk)
return export_file
示例6: humanfriendly_parse_size_or_none
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def humanfriendly_parse_size_or_none(value) -> Optional[float]:
if value.strip().lower() in ("none", "null", "nil"):
return None
return humanfriendly.parse_size(value)
示例7: humanfriendly_or_none
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
示例8: build
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def build(
self,
bloomfilters: hug.types.multiple = [],
samples: hug.types.multiple = [],
from_file: hug.types.text = None,
config: hug.types.text = None,
):
config = get_config_from_file(config)
if from_file and bloomfilters:
raise ValueError(
"You can only specify blooms via from_file or bloomfilters, but not both"
)
elif from_file:
samples = []
bloomfilters = []
with open(from_file, "r") as tsvfile:
reader = csv.reader(tsvfile, delimiter="\t")
for row in reader:
bloomfilters.append(row[0])
samples.append(row[1])
if samples:
assert len(samples) == len(bloomfilters)
else:
samples = bloomfilters
if config.get("max_build_mem_bytes"):
max_memory_bytes = humanfriendly.parse_size(config["max_build_mem_bytes"])
else:
max_memory_bytes = None
return build(
config=config,
bloomfilter_filepaths=bloomfilters,
samples=samples,
max_memory=max_memory_bytes,
)
示例9: parse_human_size
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def parse_human_size(value):
if isinstance(value, six.string_types):
return humanfriendly.parse_size(value)
return value
示例10: mem
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def mem(resources):
resources = resources or {}
return parse_size(resources.get('memory', DEFAULT_KUBERNETES_MEMORY_REQUEST)) / 1000000
示例11: disk
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def disk(resources):
resources = resources or {}
return parse_size(resources.get('ephemeral-storage', DEFAULT_KUBERNETES_DISK_REQUEST)) / 1000000
示例12: info
# 需要导入模块: import humanfriendly [as 别名]
# 或者: from humanfriendly import parse_size [as 别名]
def info(self) -> ClusterStats: # pylint: disable=too-many-locals
"""Retrieve Kubernetes cluster statistics."""
pl_status = ClusterStats()
node_list = pykube.Node.objects(self.api).filter(namespace=pykube.all).iterator()
node_dict = {}
# Get basic information from nodes
for node in node_list:
nss = NodeStats(node.name)
nss.cores_total = float(node.obj['status']['allocatable']['cpu'])
nss.memory_total = humanfriendly.parse_size(node.obj['status']['allocatable']['memory'])
nss.labels = node.obj['metadata']['labels']
nss.status = 'online'
node_dict[str(socket.gethostbyname(node.name))] = nss
# Get information from all running pods, then accumulate to nodes
pod_list = pykube.Pod.objects(self.api).filter(namespace=pykube.all).iterator()
for pod in pod_list:
try:
host_ip = pod.obj['status']['hostIP']
except KeyError:
continue
nss = node_dict[host_ip]
nss.container_count += 1
spec_cont = pod.obj['spec']['containers'][0]
if 'resources' in spec_cont:
if 'requests' in spec_cont['resources']:
if 'memory' in spec_cont['resources']['requests']:
memory = spec_cont['resources']['requests']['memory']
nss.memory_reserved = nss.memory_reserved + humanfriendly.parse_size(memory)
if 'cpu' in spec_cont['resources']['requests']:
cpu = spec_cont['resources']['requests']['cpu']
# ex: cpu could be 100m or 0.1
cpu_splitted = cpu.split('m')
if len(cpu_splitted) > 1:
cpu_float = int(cpu_splitted[0]) / 1000
else:
cpu_float = int(cpu_splitted[0])
nss.cores_reserved = round(nss.cores_reserved + cpu_float, 3)
for node_ip in node_dict:
pl_status.nodes.append(node_dict[node_ip])
return pl_status