本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubectl.AbortRollingUpdate函数的典型用法代码示例。如果您正苦于以下问题:Golang AbortRollingUpdate函数的具体用法?Golang AbortRollingUpdate怎么用?Golang AbortRollingUpdate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AbortRollingUpdate函数的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RunRollingUpdate
//.........这里部分代码省略.........
glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted")
} else {
replicasDefaulted = isReplicasDefaulted(infos[0])
}
}
// If the --image option is specified, we need to create a new rc with at least one different selector
// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
// different image.
if len(image) != 0 {
keepOldName = len(args) == 1
newName := findNewName(args, oldRc)
if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
return err
}
if newRc != nil {
fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
} else {
newRc, err = kubectl.CreateNewControllerFromCurrentController(client, cmdNamespace, oldName, newName, image, deploymentKey)
if err != nil {
return err
}
}
// Update the existing replication controller with pointers to the 'next' controller
// and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller.
oldHash, err := api.HashObject(oldRc, client.Codec)
if err != nil {
return err
}
oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
if err != nil {
return err
}
}
if oldName == newRc.Name {
return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
filename, oldName)
}
updater := kubectl.NewRollingUpdater(newRc.Namespace, updaterClient)
// To successfully pull off a rolling update the new and old rc have to differ
// by at least one selector. Every new pod should have the selector and every
// old pod should not have the selector.
var hasLabel bool
for key, oldValue := range oldRc.Spec.Selector {
if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
hasLabel = true
break
}
}
if !hasLabel {
return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
filename, oldName)
}
// TODO: handle scales during rolling update
if replicasDefaulted {
newRc.Spec.Replicas = oldRc.Spec.Replicas
}
if dryrun {
oldRcData := &bytes.Buffer{}
if err := f.PrintObject(cmd, oldRc, oldRcData); err != nil {
return err
}
newRcData := &bytes.Buffer{}
if err := f.PrintObject(cmd, newRc, newRcData); err != nil {
return err
}
fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes()))
return nil
}
updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy
if keepOldName {
updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy
}
config := &kubectl.RollingUpdaterConfig{
Out: out,
OldRc: oldRc,
NewRc: newRc,
UpdatePeriod: period,
Interval: interval,
Timeout: timeout,
CleanupPolicy: updateCleanupPolicy,
UpdateAcceptor: kubectl.DefaultUpdateAcceptor,
}
if cmdutil.GetFlagBool(cmd, "rollback") {
kubectl.AbortRollingUpdate(config)
client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc)
}
err = updater.Update(config)
if err != nil {
return err
}
if keepOldName {
fmt.Fprintf(out, "%s\n", oldName)
} else {
fmt.Fprintf(out, "%s\n", newRc.Name)
}
return nil
}
示例2: RunRollingUpdate
//.........这里部分代码省略.........
newName := findNewName(args, oldRc)
if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
return err
}
if newRc == nil {
return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName)
}
}
if oldName == newRc.Name {
return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
filename, oldName)
}
updater := kubectl.NewRollingUpdater(newRc.Namespace, client)
// To successfully pull off a rolling update the new and old rc have to differ
// by at least one selector. Every new pod should have the selector and every
// old pod should not have the selector.
var hasLabel bool
for key, oldValue := range oldRc.Spec.Selector {
if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
hasLabel = true
break
}
}
if !hasLabel {
return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
filename, oldName)
}
// TODO: handle scales during rolling update
if replicasDefaulted {
newRc.Spec.Replicas = oldRc.Spec.Replicas
}
if dryrun {
oldRcData := &bytes.Buffer{}
newRcData := &bytes.Buffer{}
if outputFormat == "" {
oldRcData.WriteString(oldRc.Name)
newRcData.WriteString(newRc.Name)
} else {
if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil {
return err
}
if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil {
return err
}
}
fmt.Fprintf(out, "Rolling from:\n%s\nTo:\n%s\n", string(oldRcData.Bytes()), string(newRcData.Bytes()))
return nil
}
updateCleanupPolicy := kubectl.DeleteRollingUpdateCleanupPolicy
if keepOldName {
updateCleanupPolicy = kubectl.RenameRollingUpdateCleanupPolicy
}
config := &kubectl.RollingUpdaterConfig{
Out: out,
OldRc: oldRc,
NewRc: newRc,
UpdatePeriod: period,
Interval: interval,
Timeout: timeout,
CleanupPolicy: updateCleanupPolicy,
MaxUnavailable: intstr.FromInt(0),
MaxSurge: intstr.FromInt(1),
}
if rollback {
err = kubectl.AbortRollingUpdate(config)
if err != nil {
return err
}
client.ReplicationControllers(config.NewRc.Namespace).Update(config.NewRc)
}
err = updater.Update(config)
if err != nil {
return err
}
message := "rolling updated"
if keepOldName {
newRc.Name = oldName
} else {
message = fmt.Sprintf("rolling updated to %q", newRc.Name)
}
newRc, err = client.ReplicationControllers(cmdNamespace).Get(newRc.Name)
if err != nil {
return err
}
if outputFormat != "" {
return f.PrintObject(cmd, mapper, newRc, out)
}
kind, err := api.Scheme.ObjectKind(newRc)
if err != nil {
return err
}
_, res := meta.KindToResource(kind)
cmdutil.PrintSuccess(mapper, false, out, res.Resource, oldName, message)
return nil
}