本文整理匯總了Golang中k8s/io/kubernetes/pkg/util.GetIntOrPercentValue函數的典型用法代碼示例。如果您正苦於以下問題:Golang GetIntOrPercentValue函數的具體用法?Golang GetIntOrPercentValue怎麽用?Golang GetIntOrPercentValue使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了GetIntOrPercentValue函數的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: reconcileNewRC
func (d *DeploymentController) reconcileNewRC(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) (bool, error) {
if newRC.Spec.Replicas == deployment.Spec.Replicas {
// Scaling not required.
return false, nil
}
if newRC.Spec.Replicas > deployment.Spec.Replicas {
// Scale down.
_, err := d.scaleRCAndRecordEvent(newRC, deployment.Spec.Replicas, deployment)
return true, err
}
// Check if we can scale up.
maxSurge, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxSurge)
if err != nil {
return false, fmt.Errorf("invalid value for MaxSurge: %v", err)
}
if isPercent {
maxSurge = util.GetValueFromPercent(maxSurge, deployment.Spec.Replicas)
}
// Find the total number of pods
currentPodCount := deploymentutil.GetReplicaCountForRCs(allRCs)
maxTotalPods := deployment.Spec.Replicas + maxSurge
if currentPodCount >= maxTotalPods {
// Cannot scale up.
return false, nil
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas.
scaleUpCount = int(math.Min(float64(scaleUpCount), float64(deployment.Spec.Replicas-newRC.Spec.Replicas)))
newReplicasCount := newRC.Spec.Replicas + scaleUpCount
_, err = d.scaleRCAndRecordEvent(newRC, newReplicasCount, deployment)
return true, err
}
示例2: scaleUp
func (d *DeploymentController) scaleUp(allRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
if newRC.Spec.Replicas == deployment.Spec.Replicas {
// Scaling up not required.
return false, nil
}
maxSurge, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxSurge)
if err != nil {
return false, fmt.Errorf("Invalid value for MaxSurge: %v", err)
}
if isPercent {
maxSurge = util.GetValueFromPercent(maxSurge, deployment.Spec.Replicas)
}
// Find the total number of pods
allPods, err := d.getPodsForRCs(allRCs)
if err != nil {
return false, err
}
currentPodCount := len(allPods)
// Check if we can scale up.
maxTotalPods := deployment.Spec.Replicas + maxSurge
if currentPodCount >= maxTotalPods {
// Cannot scale up.
return false, nil
}
// Scale up.
scaleUpCount := maxTotalPods - currentPodCount
scaleUpCount = int(math.Min(float64(scaleUpCount), float64(deployment.Spec.Replicas-newRC.Spec.Replicas)))
_, err = d.scaleRC(newRC, newRC.Spec.Replicas+scaleUpCount)
return true, err
}
示例3: scaleDown
func (d *DeploymentController) scaleDown(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment experimental.Deployment) (bool, error) {
oldPodsCount := d.getReplicaCountForRCs(oldRCs)
if oldPodsCount == 0 {
// Cant scale down further
return false, nil
}
maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
if err != nil {
return false, fmt.Errorf("Invalid value for MaxUnavailable: %v", err)
}
if isPercent {
maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
}
// Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable
// Find the number of ready pods.
// TODO: Use MinReadySeconds once https://github.com/kubernetes/kubernetes/pull/12894 is merged.
readyPodCount := 0
allPods, err := d.getPodsForRCs(allRCs)
for _, pod := range allPods {
if api.IsPodReady(&pod) {
readyPodCount++
}
}
if readyPodCount <= minAvailable {
// Cannot scale down.
return false, nil
}
totalScaleDownCount := readyPodCount - minAvailable
for _, targetRC := range oldRCs {
if totalScaleDownCount == 0 {
// No further scaling required.
break
}
if targetRC.Spec.Replicas == 0 {
// cannot scale down this RC.
continue
}
// Scale down.
scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount)))
_, err = d.scaleRC(targetRC, targetRC.Spec.Replicas-scaleDownCount)
if err != nil {
return false, err
}
totalScaleDownCount -= scaleDownCount
}
return true, err
}
示例4: reconcileOldRCs
func (d *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForRCs(oldRCs)
if oldPodsCount == 0 {
// Cant scale down further
return false, nil
}
maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
if err != nil {
return false, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
}
if isPercent {
maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
}
// Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable
minReadySeconds := deployment.Spec.Strategy.RollingUpdate.MinReadySeconds
// Find the number of ready pods.
readyPodCount, err := deploymentutil.GetAvailablePodsForRCs(d.client, allRCs, minReadySeconds)
if err != nil {
return false, fmt.Errorf("could not find available pods: %v", err)
}
if readyPodCount <= minAvailable {
// Cannot scale down.
return false, nil
}
totalScaleDownCount := readyPodCount - minAvailable
for _, targetRC := range oldRCs {
if totalScaleDownCount == 0 {
// No further scaling required.
break
}
if targetRC.Spec.Replicas == 0 {
// cannot scale down this RC.
continue
}
// Scale down.
scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount)))
newReplicasCount := targetRC.Spec.Replicas - scaleDownCount
_, err = d.scaleRCAndRecordEvent(targetRC, newReplicasCount, deployment)
if err != nil {
return false, err
}
totalScaleDownCount -= scaleDownCount
}
return true, err
}
示例5: scaleDownOldRCsForRollingUpdate
// scaleDownOldRCsForRollingUpdate scales down old rcs when deployment strategy is "RollingUpdate".
// Need check maxUnavailable to ensure availability
func (dc *DeploymentController) scaleDownOldRCsForRollingUpdate(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, deployment extensions.Deployment) (int, error) {
maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
if err != nil {
return 0, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
}
if isPercent {
maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
}
// Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable
minReadySeconds := deployment.Spec.MinReadySeconds
// Find the number of ready pods.
readyPodCount, err := deploymentutil.GetAvailablePodsForRCs(dc.client, allRCs, minReadySeconds)
if err != nil {
return 0, fmt.Errorf("could not find available pods: %v", err)
}
if readyPodCount <= minAvailable {
// Cannot scale down.
return 0, nil
}
sort.Sort(controller.ControllersByCreationTimestamp(oldRCs))
totalScaledDown := 0
totalScaleDownCount := readyPodCount - minAvailable
for _, targetRC := range oldRCs {
if totalScaledDown >= totalScaleDownCount {
// No further scaling required.
break
}
if targetRC.Spec.Replicas == 0 {
// cannot scale down this RC.
continue
}
// Scale down.
scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount-totalScaledDown)))
newReplicasCount := targetRC.Spec.Replicas - scaleDownCount
_, err = dc.scaleRCAndRecordEvent(targetRC, newReplicasCount, deployment)
if err != nil {
return totalScaledDown, err
}
totalScaledDown += scaleDownCount
}
return totalScaledDown, nil
}
示例6: reconcileOldRCs
// Set expectationsCheck to false to bypass expectations check when testing
func (dc *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment, expectationsCheck bool) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForRCs(oldRCs)
if oldPodsCount == 0 {
// Cant scale down further
return false, nil
}
maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
if err != nil {
return false, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
}
if isPercent {
maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
}
// Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable
minReadySeconds := deployment.Spec.Strategy.RollingUpdate.MinReadySeconds
// Check the expectations of deployment before counting available pods
dKey, err := controller.KeyFunc(&deployment)
if err != nil {
return false, fmt.Errorf("Couldn't get key for deployment %#v: %v", deployment, err)
}
if expectationsCheck && !dc.podExpectations.SatisfiedExpectations(dKey) {
glog.V(4).Infof("Pod expectations not met yet before reconciling old RCs\n")
return false, nil
}
// Find the number of ready pods.
readyPodCount, err := deploymentutil.GetAvailablePodsForRCs(dc.client, allRCs, minReadySeconds)
if err != nil {
return false, fmt.Errorf("could not find available pods: %v", err)
}
if readyPodCount <= minAvailable {
// Cannot scale down.
return false, nil
}
totalScaleDownCount := readyPodCount - minAvailable
totalScaledDown := 0
for _, targetRC := range oldRCs {
if totalScaleDownCount == 0 {
// No further scaling required.
break
}
if targetRC.Spec.Replicas == 0 {
// cannot scale down this RC.
continue
}
// Scale down.
scaleDownCount := int(math.Min(float64(targetRC.Spec.Replicas), float64(totalScaleDownCount)))
newReplicasCount := targetRC.Spec.Replicas - scaleDownCount
_, err = dc.scaleRCAndRecordEvent(targetRC, newReplicasCount, deployment)
if err != nil {
return false, err
}
totalScaledDown += scaleDownCount
totalScaleDownCount -= scaleDownCount
}
// Expect to see old rcs scaled down by exactly totalScaledDownCount (sum of scaleDownCount) replicas.
dKey, err = controller.KeyFunc(&deployment)
if err != nil {
return false, fmt.Errorf("Couldn't get key for deployment %#v: %v", deployment, err)
}
if expectationsCheck {
dc.podExpectations.ExpectDeletions(dKey, totalScaledDown)
}
return true, err
}
示例7: reconcileOldRCs
// Set expectationsCheck to false to bypass expectations check when testing
func (dc *DeploymentController) reconcileOldRCs(allRCs []*api.ReplicationController, oldRCs []*api.ReplicationController, newRC *api.ReplicationController, deployment extensions.Deployment, expectationsCheck bool) (bool, error) {
oldPodsCount := deploymentutil.GetReplicaCountForRCs(oldRCs)
if oldPodsCount == 0 {
// Can't scale down further
return false, nil
}
// Check the expectations of deployment before reconciling
dKey, err := controller.KeyFunc(&deployment)
if err != nil {
return false, fmt.Errorf("Couldn't get key for deployment %#v: %v", deployment, err)
}
if expectationsCheck && !dc.podExpectations.SatisfiedExpectations(dKey) {
glog.V(4).Infof("Pod expectations not met yet before reconciling old RCs\n")
return false, nil
}
minReadySeconds := deployment.Spec.MinReadySeconds
allPodsCount := deploymentutil.GetReplicaCountForRCs(allRCs)
newRCAvailablePodCount, err := deploymentutil.GetAvailablePodsForRCs(dc.client, []*api.ReplicationController{newRC}, minReadySeconds)
if err != nil {
return false, fmt.Errorf("could not find available pods: %v", err)
}
maxUnavailable, isPercent, err := util.GetIntOrPercentValue(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable)
if err != nil {
return false, fmt.Errorf("invalid value for MaxUnavailable: %v", err)
}
if isPercent {
maxUnavailable = util.GetValueFromPercent(maxUnavailable, deployment.Spec.Replicas)
}
// Check if we can scale down. We can scale down in the following 2 cases:
// * Some old rcs have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further
// increase unavailability.
// * New rc has scaled up and it's replicas becomes ready, then we can scale down old rcs in a further step.
//
// maxScaledDown := allPodsCount - minAvailable - newRCPodsUnavailable
// take into account not only maxUnavailable and any surge pods that have been created, but also unavailable pods from
// the newRC, so that the unavailable pods from the newRC would not make us scale down old RCs in a further step(that will
// increase unavailability).
//
// Concrete example:
//
// * 10 replicas
// * 2 maxUnavailable (absolute number, not percent)
// * 3 maxSurge (absolute number, not percent)
//
// case 1:
// * Deployment is updated, newRC is created with 3 replicas, oldRC is scaled down to 8, and newRC is scaled up to 5.
// * The new RC pods crashloop and never become available.
// * allPodsCount is 13. minAvailable is 8. newRCPodsUnavailable is 5.
// * A node fails and causes one of the oldRC pods to become unavailable. However, 13 - 8 - 5 = 0, so the oldRC won't be scaled down.
// * The user notices the crashloop and does kubectl rollout undo to rollback.
// * newRCPodsUnavailable is 1, since we rolled back to the good RC, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping pods will be scaled down.
// * The total number of pods will then be 9 and the newRC can be scaled up to 10.
//
// case 2:
// Same example, but pushing a new pod template instead of rolling back (aka "roll over"):
// * The new RC created must start with 0 replicas because allPodsCount is already at 13.
// * However, newRCPodsUnavailable would also be 0, so the 2 old RCs could be scaled down by 5 (13 - 8 - 0), which would then
// allow the new RC to be scaled up by 5.
minAvailable := deployment.Spec.Replicas - maxUnavailable
newRCUnavailablePodCount := newRC.Spec.Replicas - newRCAvailablePodCount
maxScaledDown := allPodsCount - minAvailable - newRCUnavailablePodCount
if maxScaledDown <= 0 {
return false, nil
}
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRCs, deployment, maxScaledDown)
if err != nil {
return false, nil
}
// Scale down old rcs, need check maxUnavailable to ensure we can scale down
scaledDownCount, err := dc.scaleDownOldRCsForRollingUpdate(allRCs, oldRCs, deployment)
if err != nil {
return false, nil
}
totalScaledDown := cleanupCount + scaledDownCount
if expectationsCheck {
dc.podExpectations.ExpectDeletions(dKey, totalScaledDown)
}
return totalScaledDown > 0, nil
}