本文整理汇总了Golang中k8s/io/kubernetes/pkg/api/resource.NewMilliQuantity函数的典型用法代码示例。如果您正苦于以下问题:Golang NewMilliQuantity函数的具体用法?Golang NewMilliQuantity怎么用?Golang NewMilliQuantity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewMilliQuantity函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: applyTo
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
if config.Env != nil {
for k, v := range config.Env {
c := &template.Spec.Containers[0]
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
}
}
if config.Labels != nil {
for k, v := range config.Labels {
template.ObjectMeta.Labels[k] = v
}
}
if config.NodeSelector != nil {
template.Spec.NodeSelector = make(map[string]string)
for k, v := range config.NodeSelector {
template.Spec.NodeSelector[k] = v
}
}
if config.Ports != nil {
for k, v := range config.Ports {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v)})
}
}
if config.HostPorts != nil {
for k, v := range config.HostPorts {
c := &template.Spec.Containers[0]
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: int32(v), HostPort: int32(v)})
}
}
if config.CpuLimit > 0 || config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
}
if config.CpuLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
}
if config.MemLimit > 0 {
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
if len(config.Volumes) > 0 {
template.Spec.Volumes = config.Volumes
}
if len(config.VolumeMounts) > 0 {
template.Spec.Containers[0].VolumeMounts = config.VolumeMounts
}
}
示例2: testNodeMetricsData
func testNodeMetricsData() (*metrics_api.NodeMetricsList, *api.NodeList) {
metrics := &metrics_api.NodeMetricsList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "1",
},
Items: []metrics_api.NodeMetrics{
{
ObjectMeta: v1.ObjectMeta{Name: "node1", ResourceVersion: "10"},
Window: unversioned.Duration{Duration: time.Minute},
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(2*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(3*(1024*1024), resource.DecimalSI),
},
},
{
ObjectMeta: v1.ObjectMeta{Name: "node2", ResourceVersion: "11"},
Window: unversioned.Duration{Duration: time.Minute},
Usage: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(6*(1024*1024), resource.DecimalSI),
v1.ResourceStorage: *resource.NewQuantity(7*(1024*1024), resource.DecimalSI),
},
},
},
}
nodes := &api.NodeList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "15",
},
Items: []api.Node{
{
ObjectMeta: api.ObjectMeta{Name: "node1", ResourceVersion: "10"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(20*(1024*1024), resource.DecimalSI),
api.ResourceStorage: *resource.NewQuantity(30*(1024*1024), resource.DecimalSI),
},
},
},
{
ObjectMeta: api.ObjectMeta{Name: "node2", ResourceVersion: "11"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(60*(1024*1024), resource.DecimalSI),
api.ResourceStorage: *resource.NewQuantity(70*(1024*1024), resource.DecimalSI),
},
},
},
},
}
return metrics, nodes
}
示例3: makeNode
func makeNode(node string, milliCPU, memory int64) *api.Node {
return &api.Node{
ObjectMeta: api.ObjectMeta{Name: node},
Status: api.NodeStatus{
Capacity: api.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
},
Allocatable: api.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
"memory": *resource.NewQuantity(memory, resource.BinarySI),
},
},
}
}
示例4: createOutOfDiskPod
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
podClient := c.Pods(ns)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "pause",
Image: "beta.gcr.io/google_containers/pause:2.0",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
// Request enough CPU to fit only two pods on a given node.
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
},
},
}
_, err := podClient.Create(pod)
expectNoError(err)
}
示例5: createOutOfDiskPod
// createOutOfDiskPod creates a pod in the given namespace with the requested amount of CPU.
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
// Request enough CPU to fit only two pods on a given node.
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
},
},
}
_, err := podClient.Create(pod)
framework.ExpectNoError(err)
}
示例6: BuildTestPod
// BuildTestPod creates a pod with specified resources.
func BuildTestPod(name string, cpu int64, mem int64) *kube_api.Pod {
pod := &kube_api.Pod{
ObjectMeta: kube_api.ObjectMeta{
Namespace: "default",
Name: name,
},
Spec: kube_api.PodSpec{
Containers: []kube_api.Container{
{
Resources: kube_api.ResourceRequirements{
Requests: kube_api.ResourceList{},
},
},
},
},
}
if cpu >= 0 {
pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceCPU] = *resource.NewMilliQuantity(cpu, resource.DecimalSI)
}
if mem >= 0 {
pod.Spec.Containers[0].Resources.Requests[kube_api.ResourceMemory] = *resource.NewQuantity(mem, resource.DecimalSI)
}
return pod
}
示例7: TestReservation
func TestReservation(t *testing.T) {
pod := buildPod("p1", 100, 200000)
pod2 := &kube_api.Pod{
Spec: kube_api.PodSpec{
Containers: []kube_api.Container{
{
Resources: kube_api.ResourceRequirements{
Requests: kube_api.ResourceList{},
},
},
},
},
}
nodeInfo := schedulercache.NewNodeInfo(pod, pod, pod2)
node := &kube_api.Node{
Status: kube_api.NodeStatus{
Capacity: kube_api.ResourceList{
kube_api.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
},
},
}
reservation, err := calculateReservation(node, nodeInfo, kube_api.ResourceCPU)
assert.NoError(t, err)
assert.InEpsilon(t, 1.0/10, reservation, 0.01)
_, err = calculateReservation(node, nodeInfo, kube_api.ResourceMemory)
assert.Error(t, err)
}
示例8: getEstimation
func (ir initialResources) getEstimation(kind api.ResourceName, c *api.Container) (*resource.Quantity, error) {
end := time.Now()
start := end.Add(-week)
var usage, samples int64
var err error
// Historical data from last 7 days for the same image:tag.
if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
return nil, err
}
if samples < samplesThreshold {
// Historical data from last 30 days for the same image:tag.
start := end.Add(-month)
if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, c.Image, true, start, end); err != nil {
return nil, err
}
}
if samples < samplesThreshold {
// Historical data from last 30 days for the same image.
start := end.Add(-month)
image := strings.Split(c.Image, ":")[0]
if usage, samples, err = ir.source.GetUsagePercentile(kind, *percentile, image, false, start, end); err != nil {
return nil, err
}
}
if samples > 0 && kind == api.ResourceCPU {
return resource.NewMilliQuantity(usage, resource.DecimalSI), nil
}
if samples > 0 && kind == api.ResourceMemory {
return resource.NewQuantity(usage, resource.DecimalSI), nil
}
return nil, nil
}
示例9: PodCPU
// PodCPU computes total cpu limit across all containers in pod
// TODO: Remove this once the mesos scheduler becomes request aware
func PodCPU(pod *api.Pod) *resource.Quantity {
val := int64(0)
for j := range pod.Spec.Containers {
val = val + pod.Spec.Containers[j].Resources.Limits.Cpu().MilliValue()
}
return resource.NewMilliQuantity(int64(val), resource.DecimalSI)
}
示例10: makeAllocatableResources
func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList {
return api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
}
}
示例11: makeResources
// cpu is in cores, memory is in GiB
func makeResources(cpu float64, memory float64) *api.NodeResources {
return &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(int64(cpu*1000), resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(int64(memory*1024*1024*1024), resource.BinarySI),
},
}
}
示例12: resourceLimits
func resourceLimits(cpu resources.CPUShares, mem resources.MegaBytes) ctOpt {
return ctOpt(func(c *api.Container) {
if c.Resources.Limits == nil {
c.Resources.Limits = make(api.ResourceList)
}
c.Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(int64(float64(cpu)*1000.0), resource.DecimalSI)
c.Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(int64(float64(mem)*1024.0*1024.0), resource.BinarySI)
})
}
示例13: TestEstimate
func TestEstimate(t *testing.T) {
cpuPerPod := int64(500)
memoryPerPod := int64(1000 * 1024 * 1024)
pod := &apiv1.Pod{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Resources: apiv1.ResourceRequirements{
Requests: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
apiv1.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
},
},
},
},
},
}
estimator := NewBasicNodeEstimator()
for i := 0; i < 5; i++ {
podCopy := *pod
estimator.Add(&podCopy)
}
assert.Equal(t, int64(500*5), estimator.cpuSum.MilliValue())
assert.Equal(t, int64(5*memoryPerPod), estimator.memorySum.Value())
assert.Equal(t, 5, estimator.GetCount())
node := &apiv1.Node{
Status: apiv1.NodeStatus{
Capacity: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewMilliQuantity(3*cpuPerPod, resource.DecimalSI),
apiv1.ResourceMemory: *resource.NewQuantity(2*memoryPerPod, resource.DecimalSI),
apiv1.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
}
estimate, report := estimator.Estimate(node)
assert.Contains(t, estimator.GetDebug(), "CPU")
assert.Contains(t, report, "CPU")
assert.Equal(t, 3, estimate)
}
示例14: TestBinpackingEstimateWithPorts
func TestBinpackingEstimateWithPorts(t *testing.T) {
estimator := NewBinpackingNodeEstimator(simulator.NewTestPredicateChecker())
cpuPerPod := int64(200)
memoryPerPod := int64(1000 * 1024 * 1024)
pod := &kube_api.Pod{
Spec: kube_api.PodSpec{
Containers: []kube_api.Container{
{
Resources: kube_api.ResourceRequirements{
Requests: kube_api.ResourceList{
kube_api.ResourceCPU: *resource.NewMilliQuantity(cpuPerPod, resource.DecimalSI),
kube_api.ResourceMemory: *resource.NewQuantity(memoryPerPod, resource.DecimalSI),
},
},
Ports: []kube_api.ContainerPort{
{
HostPort: 5555,
},
},
},
},
},
}
pods := make([]*kube_api.Pod, 0)
for i := 0; i < 8; i++ {
pods = append(pods, pod)
}
node := &kube_api.Node{
Status: kube_api.NodeStatus{
Capacity: kube_api.ResourceList{
kube_api.ResourceCPU: *resource.NewMilliQuantity(5*cpuPerPod, resource.DecimalSI),
kube_api.ResourceMemory: *resource.NewQuantity(5*memoryPerPod, resource.DecimalSI),
kube_api.ResourcePods: *resource.NewQuantity(10, resource.DecimalSI),
},
},
}
node.Status.Allocatable = node.Status.Capacity
nodeInfo := schedulercache.NewNodeInfo()
nodeInfo.SetNode(node)
estimate := estimator.Estimate(pods, nodeInfo)
assert.Equal(t, 8, estimate)
}
示例15: updateContainerResources
func updateContainerResources(config *internalConfig, container *kapi.Container) {
resources := container.Resources
memLimit, memFound := resources.Limits[kapi.ResourceMemory]
if memFound && config.memoryRequestToLimitRatio != 0 {
// memory is measured in whole bytes.
// the plugin rounds down to the nearest MiB rather than bytes to improve ease of use for end-users.
amount := memLimit.Value() * int64(config.memoryRequestToLimitRatio*100) / 100
// TODO: move into resource.Quantity
var mod int64
switch memLimit.Format {
case resource.BinarySI:
mod = 1024 * 1024
default:
mod = 1000 * 1000
}
if rem := amount % mod; rem != 0 {
amount = amount - rem
}
q := resource.NewQuantity(int64(amount), memLimit.Format)
if memFloor.Cmp(*q) > 0 {
q = memFloor.Copy()
}
resources.Requests[kapi.ResourceMemory] = *q
}
if memFound && config.limitCPUToMemoryRatio != 0 {
amount := float64(memLimit.Value()) * config.limitCPUToMemoryRatio * cpuBaseScaleFactor
q := resource.NewMilliQuantity(int64(amount), resource.DecimalSI)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Limits[kapi.ResourceCPU] = *q
}
cpuLimit, cpuFound := resources.Limits[kapi.ResourceCPU]
if cpuFound && config.cpuRequestToLimitRatio != 0 {
amount := float64(cpuLimit.MilliValue()) * config.cpuRequestToLimitRatio
q := resource.NewMilliQuantity(int64(amount), cpuLimit.Format)
if cpuFloor.Cmp(*q) > 0 {
q = cpuFloor.Copy()
}
resources.Requests[kapi.ResourceCPU] = *q
}
}