本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubectl.NewRetryParams函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRetryParams函数的具体用法?Golang NewRetryParams怎么用?Golang NewRetryParams使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRetryParams函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: DeployWithAcceptor
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
config, err := deployutil.DecodeDeploymentConfig(to, s.codec)
if err != nil {
return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
}
params := config.Spec.Strategy.RecreateParams
retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
// Execute any pre-hook.
if params != nil && params.Pre != nil {
if err := s.hookExecutor.Execute(params.Pre, to, "prehook"); err != nil {
return fmt.Errorf("Pre hook failed: %s", err)
} else {
glog.Infof("Pre hook finished")
}
}
// Scale down the from deployment.
if from != nil {
glog.Infof("Scaling %s down to zero", deployutil.LabelForDeployment(from))
_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 0: %v", deployutil.LabelForDeployment(from), err)
}
}
// Scale up the to deployment.
if desiredReplicas > 0 {
// If an UpdateAcceptor is provided, scale up to 1 and validate the replica,
// aborting if the replica isn't acceptable.
if updateAcceptor != nil {
glog.Infof("Scaling %s to 1 before performing acceptance check", deployutil.LabelForDeployment(to))
updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 1: %v", deployutil.LabelForDeployment(to), err)
}
glog.Infof("Performing acceptance check of %s", deployutil.LabelForDeployment(to))
if err := updateAcceptor.Accept(updatedTo); err != nil {
return fmt.Errorf("update acceptor rejected %s: %v", deployutil.LabelForDeployment(to), err)
}
to = updatedTo
}
// Complete the scale up.
if to.Spec.Replicas != desiredReplicas {
glog.Infof("Scaling %s to %d", deployutil.LabelForDeployment(to), desiredReplicas)
updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to %d: %v", deployutil.LabelForDeployment(to), desiredReplicas, err)
}
to = updatedTo
}
}
// Execute any post-hook. Errors are logged and ignored.
if params != nil && params.Post != nil {
if err := s.hookExecutor.Execute(params.Post, to, "posthook"); err != nil {
util.HandleError(fmt.Errorf("post hook failed: %s", err))
} else {
glog.Infof("Post hook finished")
}
}
glog.Infof("Deployment %s successfully made active", to.Name)
return nil
}
示例2:
It("should scale a job up", func() {
startParallelism := 1
endParallelism := 2
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor("Job", f.Client)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := 2
endParallelism := 1
By("Creating a job")
job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job)
示例3: Deploy
// Deploy starts the deployment process for deploymentName.
func (d *Deployer) Deploy(namespace, deploymentName string) error {
// Look up the new deployment.
to, err := d.getDeployment(namespace, deploymentName)
if err != nil {
return fmt.Errorf("couldn't get deployment %s/%s: %v", namespace, deploymentName, err)
}
// Decode the config from the deployment.
config, err := deployutil.DecodeDeploymentConfig(to, latest.Codec)
if err != nil {
return fmt.Errorf("couldn't decode deployment config from deployment %s/%s: %v", to.Namespace, to.Name, err)
}
// Get a strategy for the deployment.
strategy, err := d.strategyFor(config)
if err != nil {
return err
}
// New deployments must have a desired replica count.
desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
if !hasDesired {
return fmt.Errorf("deployment %s has no desired replica count", deployutil.LabelForDeployment(to))
}
// Find all deployments for the config.
unsortedDeployments, err := d.getDeployments(namespace, config.Name)
if err != nil {
return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
}
deployments := unsortedDeployments.Items
// Sort all the deployments by version.
sort.Sort(deployutil.ByLatestVersionDesc(deployments))
// Find any last completed deployment.
var from *kapi.ReplicationController
for _, candidate := range deployments {
if candidate.Name == to.Name {
continue
}
if deployutil.DeploymentStatusFor(&candidate) == deployapi.DeploymentStatusComplete {
from = &candidate
break
}
}
// Scale down any deployments which aren't the new or last deployment.
for _, candidate := range deployments {
// Skip the from/to deployments.
if candidate.Name == to.Name {
continue
}
if from != nil && candidate.Name == from.Name {
continue
}
// Skip the deployment if it's already scaled down.
if candidate.Spec.Replicas == 0 {
continue
}
// Scale the deployment down to zero.
retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil {
glog.Errorf("Couldn't scale down prior deployment %s: %v", deployutil.LabelForDeployment(&candidate), err)
} else {
glog.Infof("Scaled down prior deployment %s", deployutil.LabelForDeployment(&candidate))
}
}
// Perform the deployment.
if from == nil {
glog.Infof("Deploying %s for the first time (replicas: %d)", deployutil.LabelForDeployment(to), desiredReplicas)
} else {
glog.Infof("Deploying from %s to %s (replicas: %d)", deployutil.LabelForDeployment(from), deployutil.LabelForDeployment(to), desiredReplicas)
}
return strategy.Deploy(from, to, desiredReplicas)
}
示例4: RunScale
// RunScale executes the scaling
func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool, options *resource.FilenameOptions) error {
if len(os.Args) > 1 && os.Args[1] == "resize" {
printDeprecationWarning("scale", "resize")
}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
mapper, typer := f.Object()
r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, options).
ResourceTypeOrNameArgs(false, args...).
Flatten().
Do()
err = r.Err()
if resource.IsUsageError(err) {
return cmdutil.UsageError(cmd, err.Error())
}
if err != nil {
return err
}
count := cmdutil.GetFlagInt(cmd, "replicas")
if count < 0 {
return cmdutil.UsageError(cmd, "The --replicas=COUNT flag is required, and COUNT must be greater than or equal to 0")
}
infos := []*resource.Info{}
err = r.Visit(func(info *resource.Info, err error) error {
if err == nil {
infos = append(infos, info)
}
return nil
})
resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
if len(resourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple resources")
}
counter := 0
err = r.Visit(func(info *resource.Info, err error) error {
if err != nil {
return err
}
mapping := info.ResourceMapping()
scaler, err := f.Scaler(mapping)
if err != nil {
return err
}
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion}
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
var waitForReplicas *kubectl.RetryParams
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
}
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
return err
}
if cmdutil.ShouldRecord(cmd, info) {
patchBytes, err := cmdutil.ChangeResourcePatch(info, f.Command())
if err != nil {
return err
}
mapping := info.ResourceMapping()
client, err := f.ClientForMapping(mapping)
if err != nil {
return err
}
helper := resource.NewHelper(client, mapping)
_, err = helper.Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patchBytes)
if err != nil {
return err
}
}
counter++
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, false, "scaled")
return nil
})
if err != nil {
return err
}
if counter == 0 {
return fmt.Errorf("no objects passed to scale")
}
return nil
}
示例5: RunScale
// RunScale executes the scaling
func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string, shortOutput bool, options *ScaleOptions) error {
if len(os.Args) > 1 && os.Args[1] == "resize" {
printDeprecationWarning("scale", "resize")
}
count := cmdutil.GetFlagInt(cmd, "replicas")
if count < 0 {
return cmdutil.UsageError(cmd, "--replicas=COUNT is required, and COUNT must be greater than or equal to 0")
}
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
cmdTenant, enforceTenant, err := f.DefaultTenant()
if err != nil {
return err
}
mapper, typer := f.Object()
r := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
ContinueOnError().
NamespaceParam(cmdNamespace).DefaultNamespace().
TenantParam(cmdTenant).DefaultTenant().
FilenameParam(enforceTenant, enforceNamespace, options.Filenames...).
ResourceTypeOrNameArgs(false, args...).
Flatten().
Do()
err = r.Err()
if err != nil {
return err
}
infos, err := r.Infos()
if err != nil {
return err
}
info := infos[0]
mapping := info.ResourceMapping()
scaler, err := f.Scaler(mapping)
if err != nil {
return err
}
resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
if len(resourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple controllers")
}
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
if currentSize != -1 && len(infos) > 1 {
return fmt.Errorf("cannot use --current-replicas with multiple controllers")
}
precondition := &kubectl.ScalePrecondition{Size: currentSize, ResourceVersion: resourceVersion}
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
var waitForReplicas *kubectl.RetryParams
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
}
errs := []error{}
for _, info := range infos {
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
errs = append(errs, err)
continue
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
}
return utilerrors.NewAggregate(errs)
}
示例6: Deploy
// Deploy starts the deployment process for rcName.
func (d *Deployer) Deploy(namespace, rcName string) error {
// Look up the new deployment.
to, err := d.getDeployment(namespace, rcName)
if err != nil {
return fmt.Errorf("couldn't get deployment %s: %v", rcName, err)
}
// Decode the config from the deployment.
config, err := deployutil.DecodeDeploymentConfig(to, kapi.Codecs.UniversalDecoder())
if err != nil {
return fmt.Errorf("couldn't decode deployment config from deployment %s: %v", to.Name, err)
}
// Get a strategy for the deployment.
s, err := d.strategyFor(config)
if err != nil {
return err
}
// New deployments must have a desired replica count.
desiredReplicas, hasDesired := deployutil.DeploymentDesiredReplicas(to)
if !hasDesired {
return fmt.Errorf("deployment %s has already run to completion", to.Name)
}
// Find all deployments for the config.
unsortedDeployments, err := d.getDeployments(namespace, config.Name)
if err != nil {
return fmt.Errorf("couldn't get controllers in namespace %s: %v", namespace, err)
}
deployments := unsortedDeployments.Items
// Sort all the deployments by version.
sort.Sort(deployutil.ByLatestVersionDesc(deployments))
// Find any last completed deployment.
var from *kapi.ReplicationController
for _, candidate := range deployments {
if candidate.Name == to.Name {
continue
}
if deployutil.IsCompleteDeployment(&candidate) {
from = &candidate
break
}
}
if deployutil.DeploymentVersionFor(to) < deployutil.DeploymentVersionFor(from) {
return fmt.Errorf("deployment %s is older than %s", to.Name, from.Name)
}
// Scale down any deployments which aren't the new or last deployment.
for _, candidate := range deployments {
// Skip the from/to deployments.
if candidate.Name == to.Name {
continue
}
if from != nil && candidate.Name == from.Name {
continue
}
// Skip the deployment if it's already scaled down.
if candidate.Spec.Replicas == 0 {
continue
}
// Scale the deployment down to zero.
retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil {
fmt.Fprintf(d.errOut, "error: Couldn't scale down prior deployment %s: %v\n", deployutil.LabelForDeployment(&candidate), err)
} else {
fmt.Fprintf(d.out, "--> Scaled older deployment %s down\n", candidate.Name)
}
}
if d.until == "start" {
return strategy.NewConditionReachedErr("Ready to start deployment")
}
// Perform the deployment.
if err := s.Deploy(from, to, int(desiredReplicas)); err != nil {
return err
}
fmt.Fprintf(d.out, "--> Success\n")
return nil
}
示例7: DeployWithAcceptor
// DeployWithAcceptor scales down from and then scales up to. If
// updateAcceptor is provided and the desired replica count is >1, the first
// replica of to is rolled out and validated before performing the full scale
// up.
//
// This is currently only used in conjunction with the rolling update strategy
// for initial deployments.
func (s *RecreateDeploymentStrategy) DeployWithAcceptor(from *kapi.ReplicationController, to *kapi.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error {
config, err := deployutil.DecodeDeploymentConfig(to, s.decoder)
if err != nil {
return fmt.Errorf("couldn't decode config from deployment %s: %v", to.Name, err)
}
params := config.Spec.Strategy.RecreateParams
retryParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
waitParams := kubectl.NewRetryParams(s.retryPeriod, s.retryTimeout)
if updateAcceptor == nil {
updateAcceptor = s.getUpdateAcceptor(time.Duration(*params.TimeoutSeconds)*time.Second, config.Spec.MinReadySeconds)
}
// Execute any pre-hook.
if params != nil && params.Pre != nil {
if err := s.hookExecutor.Execute(params.Pre, to, deployapi.PreHookPodSuffix, "pre"); err != nil {
return fmt.Errorf("pre hook failed: %s", err)
}
}
if s.until == "pre" {
return strat.NewConditionReachedErr("pre hook succeeded")
}
// Record all warnings
defer stratutil.RecordConfigWarnings(s.eventClient, from, s.decoder, s.out)
defer stratutil.RecordConfigWarnings(s.eventClient, to, s.decoder, s.out)
// Scale down the from deployment.
if from != nil {
fmt.Fprintf(s.out, "--> Scaling %s down to zero\n", from.Name)
_, err := s.scaleAndWait(from, 0, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 0: %v", from.Name, err)
}
}
if s.until == "0%" {
return strat.NewConditionReachedErr("Reached 0% (no running pods)")
}
if params != nil && params.Mid != nil {
if err := s.hookExecutor.Execute(params.Mid, to, deployapi.MidHookPodSuffix, "mid"); err != nil {
return fmt.Errorf("mid hook failed: %s", err)
}
}
if s.until == "mid" {
return strat.NewConditionReachedErr("mid hook succeeded")
}
accepted := false
// Scale up the to deployment.
if desiredReplicas > 0 {
if from != nil {
// Scale up to 1 and validate the replica,
// aborting if the replica isn't acceptable.
fmt.Fprintf(s.out, "--> Scaling %s to 1 before performing acceptance check\n", to.Name)
updatedTo, err := s.scaleAndWait(to, 1, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to 1: %v", to.Name, err)
}
if err := updateAcceptor.Accept(updatedTo); err != nil {
return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
}
accepted = true
to = updatedTo
if strat.PercentageBetween(s.until, 1, 99) {
return strat.NewConditionReachedErr(fmt.Sprintf("Reached %s", s.until))
}
}
// Complete the scale up.
if to.Spec.Replicas != int32(desiredReplicas) {
fmt.Fprintf(s.out, "--> Scaling %s to %d\n", to.Name, desiredReplicas)
updatedTo, err := s.scaleAndWait(to, desiredReplicas, retryParams, waitParams)
if err != nil {
return fmt.Errorf("couldn't scale %s to %d: %v", to.Name, desiredReplicas, err)
}
to = updatedTo
}
if !accepted {
if err := updateAcceptor.Accept(to); err != nil {
return fmt.Errorf("update acceptor rejected %s: %v", to.Name, err)
}
}
}
//.........这里部分代码省略.........