本文整理匯總了Golang中k8s/io/kubernetes/pkg/kubectl.ScalerFor函數的典型用法代碼示例。如果您正苦於以下問題:Golang ScalerFor函數的具體用法?Golang ScalerFor怎麽用?Golang ScalerFor使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ScalerFor函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: NewDeployer
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface, oclient client.Interface, out, errOut io.Writer, until string) *Deployer {
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
return &Deployer{
out: out,
errOut: errOut,
until: until,
getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
return client.ReplicationControllers(namespace).List(kapi.ListOptions{LabelSelector: deployutil.ConfigSelector(configName)})
},
scaler: scaler,
strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
switch config.Spec.Strategy.Type {
case deployapi.DeploymentStrategyTypeRecreate:
return recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until), nil
case deployapi.DeploymentStrategyTypeRolling:
recreate := recreate.NewRecreateDeploymentStrategy(client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), out, errOut, until)
return rolling.NewRollingDeploymentStrategy(config.Namespace, client, oclient, client.Events(""), kapi.Codecs.UniversalDecoder(), recreate, out, errOut, until), nil
default:
return nil, fmt.Errorf("unsupported strategy type: %s", config.Spec.Strategy.Type)
}
},
}
}
示例2: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy {
if out == nil {
out = ioutil.Discard
}
if errOut == nil {
errOut = ioutil.Discard
}
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
return &RecreateDeploymentStrategy{
out: out,
errOut: errOut,
events: events,
until: until,
rcClient: client,
eventClient: client,
getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(out, client, timeout, AcceptorInterval, minReadySeconds)
},
scaler: scaler,
decoder: decoder,
hookExecutor: stratsupport.NewHookExecutor(client, tagClient, client, os.Stdout, decoder),
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例3: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(oldClient kclient.Interface, tagClient client.ImageStreamTagsNamespacer, events record.EventSink, decoder runtime.Decoder, out, errOut io.Writer, until string) *RecreateDeploymentStrategy {
if out == nil {
out = ioutil.Discard
}
if errOut == nil {
errOut = ioutil.Discard
}
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), oldClient)
// TODO internalclientset: get rid of oldClient after next rebase
client := adapter.FromUnversionedClient(oldClient.(*kclient.Client))
return &RecreateDeploymentStrategy{
out: out,
errOut: errOut,
events: events,
until: until,
rcClient: client.Core(),
eventClient: client.Core(),
getUpdateAcceptor: func(timeout time.Duration, minReadySeconds int32) strat.UpdateAcceptor {
return stratsupport.NewAcceptAvailablePods(out, client.Core(), timeout, acceptorInterval, minReadySeconds)
},
scaler: scaler,
decoder: decoder,
hookExecutor: stratsupport.NewHookExecutor(client.Core(), tagClient, client.Core(), os.Stdout, decoder),
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例4: Scaler
func (f *ring1Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
clientset, err := f.clientAccessFactory.ClientSetForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset)
}
示例5: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
return &RecreateDeploymentStrategy{
getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
scaler: scaler,
codec: codec,
hookExecutor: stratsupport.NewHookExecutor(client, os.Stdout, codec),
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例6: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, tagClient client.ImageStreamTagsNamespacer, decoder runtime.Decoder) *RecreateDeploymentStrategy {
scaler, _ := kubectl.ScalerFor(kapi.Kind("ReplicationController"), client)
return &RecreateDeploymentStrategy{
getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
getUpdateAcceptor: func(timeout time.Duration) strat.UpdateAcceptor {
return stratsupport.NewAcceptNewlyObservedReadyPods(client, timeout, AcceptorInterval)
},
scaler: scaler,
decoder: decoder,
hookExecutor: stratsupport.NewHookExecutor(client, tagClient, os.Stdout, decoder),
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例7: ScaleRC
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient))
if err != nil {
return nil, err
}
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
if err != nil {
return nil, err
}
scaled, err := restClient.ReplicationControllers(ns).Get(name)
if err != nil {
return nil, err
}
return scaled, nil
}
示例8: ScaleRC
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int32, clientset clientset.Interface) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
if err != nil {
return nil, err
}
retry := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
waitForReplicas := &kubectl.RetryParams{Interval: 50 * time.Millisecond, Timeout: DefaultTimeout}
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
if err != nil {
return nil, err
}
scaled, err := clientset.Core().ReplicationControllers(ns).Get(name)
if err != nil {
return nil, err
}
return scaled, nil
}
示例9: NewRecreateDeploymentStrategy
// NewRecreateDeploymentStrategy makes a RecreateDeploymentStrategy backed by
// a real HookExecutor and client.
func NewRecreateDeploymentStrategy(client kclient.Interface, codec runtime.Codec) *RecreateDeploymentStrategy {
scaler, _ := kubectl.ScalerFor("ReplicationController", client)
return &RecreateDeploymentStrategy{
getReplicationController: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
scaler: scaler,
codec: codec,
hookExecutor: &stratsupport.HookExecutor{
PodClient: &stratsupport.HookExecutorPodClientImpl{
CreatePodFunc: func(namespace string, pod *kapi.Pod) (*kapi.Pod, error) {
return client.Pods(namespace).Create(pod)
},
PodWatchFunc: func(namespace, name, resourceVersion string, stopChannel chan struct{}) func() *kapi.Pod {
return stratsupport.NewPodWatch(client, namespace, name, resourceVersion, stopChannel)
},
},
},
retryTimeout: 120 * time.Second,
retryPeriod: 1 * time.Second,
}
}
示例10: NewDeployer
// NewDeployer makes a new Deployer from a kube client.
func NewDeployer(client kclient.Interface) *Deployer {
scaler, _ := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(client))
return &Deployer{
getDeployment: func(namespace, name string) (*kapi.ReplicationController, error) {
return client.ReplicationControllers(namespace).Get(name)
},
getDeployments: func(namespace, configName string) (*kapi.ReplicationControllerList, error) {
return client.ReplicationControllers(namespace).List(deployutil.ConfigSelector(configName))
},
scaler: scaler,
strategyFor: func(config *deployapi.DeploymentConfig) (strategy.DeploymentStrategy, error) {
switch config.Template.Strategy.Type {
case deployapi.DeploymentStrategyTypeRecreate:
return recreate.NewRecreateDeploymentStrategy(client, latest.Codec), nil
case deployapi.DeploymentStrategyTypeRolling:
recreate := recreate.NewRecreateDeploymentStrategy(client, latest.Codec)
return rolling.NewRollingDeploymentStrategy(config.Namespace, client, latest.Codec, recreate), nil
default:
return nil, fmt.Errorf("unsupported strategy type: %s", config.Template.Strategy.Type)
}
},
}
}
示例11:
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
示例12:
})
It("should scale a job up", func() {
startParallelism := 1
endParallelism := 2
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor("Job", f.Client)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := 2
endParallelism := 1
By("Creating a job")
示例13: NewFactory
//.........這裏部分代碼省略.........
return false, err
}
return false, fmt.Errorf("cannot pause %v", gvk)
}
},
ResumeObject: func(object runtime.Object) (bool, error) {
c, err := clients.ClientForVersion(nil)
if err != nil {
return false, err
}
switch t := object.(type) {
case *extensions.Deployment:
if !t.Spec.Paused {
return true, nil
}
t.Spec.Paused = false
_, err := c.Extensions().Deployments(t.Namespace).Update(t)
return false, err
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
return false, err
}
return false, fmt.Errorf("cannot resume %v", gvk)
}
},
Scaler: func(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Reaper: func(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), client)
},
HistoryViewer: func(mapping *meta.RESTMapping) (kubectl.HistoryViewer, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
clientset := clientset.FromUnversionedClient(client)
if err != nil {
return nil, err
}
return kubectl.HistoryViewerFor(mapping.GroupVersionKind.GroupKind(), clientset)
},
Rollbacker: func(mapping *meta.RESTMapping) (kubectl.Rollbacker, error) {
mappingVersion := mapping.GroupVersionKind.GroupVersion()
client, err := clients.ClientForVersion(&mappingVersion)
if err != nil {
return nil, err
}
return kubectl.RollbackerFor(mapping.GroupVersionKind.GroupKind(), client)
},
Validator: func(validate bool, cacheDir string) (validation.Schema, error) {
if validate {
client, err := clients.ClientForVersion(nil)
if err != nil {
return nil, err
}
示例14:
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.Client)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunningV1(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
示例15:
})
It("should scale a job up", func() {
startParallelism := 1
endParallelism := 2
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.Client, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(extensions.Kind("Job"), f.Client)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.Client, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := 2
endParallelism := 1
By("Creating a job")