本文整理汇总了Golang中k8s/io/kubernetes/pkg/kubectl.NewRollingUpdater函数的典型用法代码示例。如果您正苦于以下问题:Golang NewRollingUpdater函数的具体用法?Golang NewRollingUpdater怎么用?Golang NewRollingUpdater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewRollingUpdater函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
return &RollingDeploymentStrategy{
codec: codec,
initialStrategy: initialStrategy,
client: client,
apiRetryPeriod: DefaultApiRetryPeriod,
apiRetryTimeout: DefaultApiRetryTimeout,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, client)
return updater.Update(config)
},
hookExecutor: &stratsupport.HookExecutor{
PodClient: &stratsupport.HookExecutorPodClientImpl{
CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return client.Pods(namespace).Create(pod)
},
PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
},
},
},
getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
},
}
}
示例2: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, tags client.ImageStreamTagsNamespacer, decoder runtime.Decoder, initialStrategy acceptingDeploymentStrategy, out, errOut io.Writer, until string) *RollingDeploymentStrategy {
if out == nil {
out = ioutil.Discard
}
if errOut == nil {
errOut = ioutil.Discard
}
return &RollingDeploymentStrategy{
out: out,
errOut: errOut,
until: until,
decoder: decoder,
initialStrategy: initialStrategy,
client: client,
tags: tags,
apiRetryPeriod: DefaultApiRetryPeriod,
apiRetryTimeout: DefaultApiRetryTimeout,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, client)
return updater.Update(config)
},
hookExecutor: stratsupport.NewHookExecutor(client, tags, os.Stdout, decoder),
getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval)
},
}
}
示例3: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, oldClient kclient.Interface, tags client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, initialStrategy acceptingDeploymentStrategy, out, errOut io.Writer, until string) *RollingDeploymentStrategy {
if out == nil {
out = ioutil.Discard
}
if errOut == nil {
errOut = ioutil.Discard
}
// TODO internalclientset: get rid of oldClient after next rebase
client := adapter.FromUnversionedClient(oldClient.(*kclient.Client))
return &RollingDeploymentStrategy{
out: out,
errOut: errOut,
until: until,
decoder: decoder,
initialStrategy: initialStrategy,
rcClient: client.Core(),
eventClient: client.Core(),
tags: tags,
apiRetryPeriod: defaultApiRetryPeriod,
apiRetryTimeout: defaultApiRetryTimeout,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, oldClient)
return updater.Update(config)
},
hookExecutor: stratsupport.NewHookExecutor(client.Core(), tags, client.Core(), os.Stdout, decoder),
getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
return stratsupport.NewAcceptAvailablePods(out, client.Core(), timeout, acceptorInterval, minReadySeconds)
},
}
}
示例4: update
func (k *Krud) update(h *Webhook) error {
h.UpdateAttempt = true
h.UpdateStart = time.Now()
defer func() {
h.UpdateEnd = time.Now()
}()
conf := &client.Config{
Host: k.Endpoint,
}
client, err := client.New(conf)
if err != nil {
return err
}
rcs := client.ReplicationControllers(k.Namespace)
oldRc, err := rcs.Get(k.ControllerName)
if err != nil {
return err
}
newRc, err := rcs.Get(k.ControllerName)
if err != nil {
return err
}
hash, err := api.HashObject(oldRc, client.Codec)
if err != nil {
return err
}
h.UpdateID = hash
newRc.Name = fmt.Sprintf("%s-%s", k.ControllerName, hash)
newRc.ResourceVersion = ""
apply := func(key, value string, ms ...map[string]string) {
for _, m := range ms {
m[key] = value
}
}
apply(k.DeploymentKey, hash, newRc.Spec.Selector, newRc.Spec.Template.Labels)
apply("run", k.ControllerName, newRc.Spec.Selector, newRc.Spec.Template.Labels)
ruconf := kubectl.RollingUpdaterConfig{
Out: &lockBuffer{
k: k,
h: h,
},
OldRc: oldRc,
NewRc: newRc,
UpdatePeriod: time.Second * 3, // todo: change to time.Minute
Timeout: time.Minute * 5,
Interval: time.Second * 3,
UpdateAcceptor: kubectl.DefaultUpdateAcceptor,
CleanupPolicy: kubectl.RenameRollingUpdateCleanupPolicy,
}
ruc := kubectl.NewRollingUpdaterClient(client)
println("doing rolling update")
err = kubectl.NewRollingUpdater(k.Namespace, ruc).Update(&ruconf)
println("done")
k.Lock()
h.UpdateSuccess = err == nil
k.Unlock()
return err
}
示例5: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
updaterClient := &rollingUpdaterClient{
ControllerHasDesiredReplicasFn: func(rc *kapi.ReplicationController) wait.ConditionFunc {
return kclient.ControllerHasDesiredReplicas(client, rc)
},
GetReplicationControllerFn: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
UpdateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Update(rc)
},
// This guards against the RollingUpdater's built-in behavior to create
// RCs when the supplied old RC is nil. We won't pass nil, but it doesn't
// hurt to further guard against it since we would have no way to identify
// or clean up orphaned RCs RollingUpdater might inadvertently create.
CreateReplicationControllerFn: func(namespace string, rc *kapi.ReplicationController) (*kapi.ReplicationController, error) {
return nil, fmt.Errorf("unexpected attempt to create Deployment: %#v", rc)
},
// We give the RollingUpdater a policy which should prevent it from
// deleting the source deployment after the transition, but it doesn't
// hurt to guard by removing its ability to delete.
DeleteReplicationControllerFn: func(namespace, name string) error {
return fmt.Errorf("unexpected attempt to delete Deployment %s/%s", namespace, name)
},
}
return &RollingDeploymentStrategy{
codec: codec,
initialStrategy: initialStrategy,
client: updaterClient,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, updaterClient)
return updater.Update(config)
},
hookExecutor: &stratsupport.HookExecutor{
PodClient: &stratsupport.HookExecutorPodClientImpl{
CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return client.Pods(namespace).Create(pod)
},
PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
},
},
},
getUpdateAcceptor: func(timeout time.Duration) kubectl.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
},
}
}
示例6: NewRollingDeploymentStrategy
// NewRollingDeploymentStrategy makes a new RollingDeploymentStrategy.
func NewRollingDeploymentStrategy(namespace string, client kclient.Interface, codec runtime.Codec, initialStrategy acceptingDeploymentStrategy) *RollingDeploymentStrategy {
return &RollingDeploymentStrategy{
codec: codec,
initialStrategy: initialStrategy,
client: client,
apiRetryPeriod: DefaultApiRetryPeriod,
apiRetryTimeout: DefaultApiRetryTimeout,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, client)
return updater.Update(config)
},
hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout),
getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
},
}
}
示例7: RunRollingUpdate
func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
if len(os.Args) > 1 && os.Args[1] == "rollingupdate" {
printDeprecationWarning("rolling-update", "rollingupdate")
}
deploymentKey, filename, image, oldName, err := validateArguments(cmd, args)
if err != nil {
return err
}
period := cmdutil.GetFlagDuration(cmd, "update-period")
interval := cmdutil.GetFlagDuration(cmd, "poll-interval")
timeout := cmdutil.GetFlagDuration(cmd, "timeout")
dryrun := cmdutil.GetFlagBool(cmd, "dry-run")
cmdNamespace, enforceNamespace, err := f.DefaultNamespace()
if err != nil {
return err
}
client, err := f.Client()
if err != nil {
return err
}
updaterClient := kubectl.NewRollingUpdaterClient(client)
var newRc *api.ReplicationController
// fetch rc
oldRc, err := client.ReplicationControllers(cmdNamespace).Get(oldName)
if err != nil {
if !errors.IsNotFound(err) || len(image) == 0 || len(args) > 1 {
return err
}
// We're in the middle of a rename, look for an RC with a source annotation of oldName
newRc, err := kubectl.FindSourceController(updaterClient, cmdNamespace, oldName)
if err != nil {
return err
}
return kubectl.Rename(kubectl.NewRollingUpdaterClient(client), newRc, oldName)
}
var keepOldName bool
var replicasDefaulted bool
mapper, typer := f.Object()
if len(filename) != 0 {
schema, err := f.Validator()
if err != nil {
return err
}
request := resource.NewBuilder(mapper, typer, f.ClientMapperForCommand()).
Schema(schema).
NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(enforceNamespace, filename).
Do()
obj, err := request.Object()
if err != nil {
return err
}
var ok bool
// Handle filename input from stdin. The resource builder always returns an api.List
// when creating resource(s) from a stream.
if list, ok := obj.(*api.List); ok {
if len(list.Items) > 1 {
return cmdutil.UsageError(cmd, "%s specifies multiple items", filename)
}
obj = list.Items[0]
}
newRc, ok = obj.(*api.ReplicationController)
if !ok {
if _, kind, err := typer.ObjectVersionAndKind(obj); err == nil {
return cmdutil.UsageError(cmd, "%s contains a %s not a ReplicationController", filename, kind)
}
glog.V(4).Infof("Object %#v is not a ReplicationController", obj)
return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename)
}
infos, err := request.Infos()
if err != nil || len(infos) != 1 {
glog.V(2).Infof("was not able to recover adequate information to discover if .spec.replicas was defaulted")
} else {
replicasDefaulted = isReplicasDefaulted(infos[0])
}
}
// If the --image option is specified, we need to create a new rc with at least one different selector
// than the old rc. This selector is the hash of the rc, which will differ because the new rc has a
// different image.
if len(image) != 0 {
keepOldName = len(args) == 1
newName := findNewName(args, oldRc)
if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
return err
}
if newRc != nil {
fmt.Fprintf(out, "Found existing update in progress (%s), resuming.\n", newRc.Name)
} else {
newRc, err = kubectl.CreateNewControllerFromCurrentController(client, cmdNamespace, oldName, newName, image, deploymentKey)
if err != nil {
return err
}
//.........这里部分代码省略.........
示例8: RunRollingUpdate
//.........这里部分代码省略.........
}
}
// Update the existing replication controller with pointers to the 'next' controller
// and adding the <deploymentKey> label if necessary to distinguish it from the 'next' controller.
oldHash, err := api.HashObject(oldRc, codec)
if err != nil {
return err
}
// If new image is same as old, the hash may not be distinct, so add a suffix.
oldHash += "-orig"
oldRc, err = kubectl.UpdateExistingReplicationController(client, oldRc, cmdNamespace, newRc.Name, deploymentKey, oldHash, out)
if err != nil {
return err
}
}
if rollback {
keepOldName = len(args) == 1
newName := findNewName(args, oldRc)
if newRc, err = kubectl.LoadExistingNextReplicationController(client, cmdNamespace, newName); err != nil {
return err
}
if newRc == nil {
return cmdutil.UsageError(cmd, "Could not find %s to rollback.\n", newName)
}
}
if oldName == newRc.Name {
return cmdutil.UsageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
filename, oldName)
}
updater := kubectl.NewRollingUpdater(newRc.Namespace, client)
// To successfully pull off a rolling update the new and old rc have to differ
// by at least one selector. Every new pod should have the selector and every
// old pod should not have the selector.
var hasLabel bool
for key, oldValue := range oldRc.Spec.Selector {
if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue {
hasLabel = true
break
}
}
if !hasLabel {
return cmdutil.UsageError(cmd, "%s must specify a matching key with non-equal value in Selector for %s",
filename, oldName)
}
// TODO: handle scales during rolling update
if replicasDefaulted {
newRc.Spec.Replicas = oldRc.Spec.Replicas
}
if dryrun {
oldRcData := &bytes.Buffer{}
newRcData := &bytes.Buffer{}
if outputFormat == "" {
oldRcData.WriteString(oldRc.Name)
newRcData.WriteString(newRc.Name)
} else {
if err := f.PrintObject(cmd, mapper, oldRc, oldRcData); err != nil {
return err
}
if err := f.PrintObject(cmd, mapper, newRc, newRcData); err != nil {
return err
}