本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/flowcontrol.NewBackOff函数的典型用法代码示例。如果您正苦于以下问题:Golang NewBackOff函数的具体用法?Golang NewBackOff怎么用?Golang NewBackOff使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewBackOff函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestURLBackoffFunctionality
// TestURLBackoffFunctionality generally tests the URLBackoff wrapper. We avoid duplicating tests from backoff and request.
func TestURLBackoffFunctionality(t *testing.T) {
myBackoff := &URLBackoff{
Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second),
}
// Now test that backoff increases, then recovers.
// 200 and 300 should both result in clearing the backoff.
// all others like 429 should result in increased backoff.
seconds := []int{0,
1, 2, 4, 8, 0,
1, 2}
returnCodes := []int{
429, 500, 501, 502, 300,
500, 501, 502,
}
if len(seconds) != len(returnCodes) {
t.Fatalf("responseCode to backoff arrays should be the same length... sanity check failed.")
}
for i, sec := range seconds {
backoffSec := myBackoff.CalculateBackoff(parse("http://1.2.3.4:100"))
if backoffSec < time.Duration(sec)*time.Second || backoffSec > time.Duration(sec+5)*time.Second {
t.Errorf("Backoff out of range %v: %v %v", i, sec, backoffSec)
}
myBackoff.UpdateBackoff(parse("http://1.2.3.4:100/responseCodeForFuncTest"), nil, returnCodes[i])
}
if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) == 0 {
t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7])
}
}
示例2: readExpBackoffConfig
// readExpBackoffConfig handles the internal logic of determining what the
// backoff policy is. By default if no information is available, NoBackoff.
// TODO Generalize this see #17727 .
func readExpBackoffConfig() BackoffManager {
backoffBase := os.Getenv(envBackoffBase)
backoffDuration := os.Getenv(envBackoffDuration)
backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64)
backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64)
if errBase != nil || errDuration != nil {
return &NoBackoff{}
}
return &URLBackoff{
Backoff: flowcontrol.NewBackOff(
time.Duration(backoffBaseInt)*time.Second,
time.Duration(backoffDurationInt)*time.Second)}
}
示例3: TestURLBackoffFunctionalityCollisions
func TestURLBackoffFunctionalityCollisions(t *testing.T) {
myBackoff := &URLBackoff{
Backoff: flowcontrol.NewBackOff(1*time.Second, 60*time.Second),
}
// Add some noise and make sure backoff for a clean URL is zero.
myBackoff.UpdateBackoff(parse("http://100.200.300.400:8080"), nil, 500)
myBackoff.UpdateBackoff(parse("http://1.2.3.4:8080"), nil, 500)
if myBackoff.CalculateBackoff(parse("http://1.2.3.4:100")) > 0 {
t.Errorf("URLs are colliding in the backoff map!")
}
}
示例4: TestSyncPodWithInitContainers
func TestSyncPodWithInitContainers(t *testing.T) {
fakeRuntime, _, m, err := createTestRuntimeManager()
assert.NoError(t, err)
initContainers := []api.Container{
{
Name: "init1",
Image: "init",
ImagePullPolicy: api.PullIfNotPresent,
},
}
containers := []api.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: api.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: containers,
InitContainers: initContainers,
},
}
_, _, err = makeAndSetFakePod(m, fakeRuntime, pod)
assert.NoError(t, err)
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
podStatus, err := m.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
assert.NoError(t, err)
result := m.SyncPod(pod, api.PodStatus{}, podStatus, []api.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 3, len(fakeRuntime.Containers))
expectedContainers := []string{"foo1_0", "foo2_0", "init1_0"}
if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
t.Errorf("expected %q, got %q", expectedContainers, actual)
}
}
示例5: TestSyncPod
func TestSyncPod(t *testing.T) {
fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
assert.NoError(t, err)
containers := []api.Container{
{
Name: "foo1",
Image: "busybox",
ImagePullPolicy: api.PullIfNotPresent,
},
{
Name: "foo2",
Image: "alpine",
ImagePullPolicy: api.PullIfNotPresent,
},
}
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: api.PodSpec{
Containers: containers,
},
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
result := m.SyncPod(pod, api.PodStatus{}, &kubecontainer.PodStatus{}, []api.Secret{}, backOff)
assert.NoError(t, result.Error())
assert.Equal(t, 2, len(fakeRuntime.Containers))
assert.Equal(t, 2, len(fakeImage.Images))
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
for _, sandbox := range fakeRuntime.Sandboxes {
assert.Equal(t, runtimeApi.PodSandBoxState_READY, sandbox.GetState())
}
for _, c := range fakeRuntime.Containers {
assert.Equal(t, runtimeApi.ContainerState_RUNNING, c.GetState())
}
}
示例6: NewFakeKubeRuntimeManager
func NewFakeKubeRuntimeManager(runtimeService internalApi.RuntimeService, imageService internalApi.ImageManagerService) (*kubeGenericRuntimeManager, error) {
networkPlugin, _ := network.InitNetworkPlugin(
[]network.NetworkPlugin{},
"",
nettest.NewFakeHost(nil),
componentconfig.HairpinNone,
"10.0.0.0/8",
)
return NewKubeGenericRuntimeManager(
&record.FakeRecorder{},
proberesults.NewManager(),
kubecontainer.NewRefManager(),
&containertest.FakeOS{},
networkPlugin,
&fakeRuntimeHelper{},
&fakeHTTP{},
flowcontrol.NewBackOff(time.Second, 300*time.Second),
false,
false,
runtimeService,
imageService,
)
}
示例7: NewReplicaSetController
// NewclusterController returns a new cluster controller
func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSetController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(federationClient))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-replicaset-controller"})
frsc := &ReplicaSetController{
fedClient: federationClient,
replicasetDeliverer: fedutil.NewDelayingDeliverer(),
clusterDeliverer: fedutil.NewDelayingDeliverer(),
replicasetWorkQueue: workqueue.New(),
replicaSetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
Clusters: map[string]fed.ClusterReplicaSetPreferences{
"*": {Weight: 1},
},
}),
eventRecorder: recorder,
}
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(versionedOptions)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) { frsc.deliverLocalReplicaSet(obj, replicaSetReviewDelay) },
),
)
}
clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *fedv1.Cluster) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterAvailableDelay)
},
ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
}
frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return clientset.Core().Pods(apiv1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return clientset.Core().Pods(apiv1.NamespaceAll).Watch(versionedOptions)
},
},
&apiv1.Pod{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, allReplicaSetReviewDelay)
},
),
)
}
frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})
frsc.replicaSetStore.Indexer, frsc.replicaSetController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := fedutil.VersionizeV1ListOptions(options)
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(versionedOptions)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnMetaAndSpecChanges(
func(obj runtime.Object) { frsc.deliverFedReplicaSetObj(obj, replicaSetReviewDelay) },
),
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
frsc.fedUpdater = fedutil.NewFederatedUpdater(frsc.fedReplicaSetInformer,
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.ReplicaSet)
_, err := client.Extensions().ReplicaSets(rs.Namespace).Create(rs)
return err
},
func(client kubeclientset.Interface, obj runtime.Object) error {
rs := obj.(*extensionsv1.ReplicaSet)
_, err := client.Extensions().ReplicaSets(rs.Namespace).Update(rs)
//.........这里部分代码省略.........
示例8: NewIngressController
// NewIngressController returns a new ingress controller
func NewIngressController(client federation_release_1_4.Interface) *IngressController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-ingress-controller"})
ic := &IngressController{
federatedApiClient: client,
ingressReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
ingressBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build deliverers for triggering reconcilations.
ic.ingressDeliverer = util.NewDelayingDeliverer()
ic.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensions_v1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
ic.deliverIngressObj(obj, 0, false)
},
))
// Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kube_release_1_4.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensions_v1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
// Trigger reconcilation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some ingress operation suceeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the ingresses again.
ic.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(ic.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
ic.federatedUpdater = util.NewFederatedUpdater(ic.ingressFederatedInformer,
func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
glog.V(4).Infof("Attempting to create Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Create(ingress)
return err
},
func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
glog.V(4).Infof("Attempting to update Ingress: %v", ingress)
_, err := client.Extensions().Ingresses(ingress.Namespace).Update(ingress)
return err
},
func(client kube_release_1_4.Interface, obj pkg_runtime.Object) error {
ingress := obj.(*extensions_v1beta1.Ingress)
glog.V(4).Infof("Attempting to delete Ingress: %v", ingress)
err := client.Extensions().Ingresses(ingress.Namespace).Delete(ingress.Name, &api.DeleteOptions{})
return err
})
return ic
}
示例9: NewIngressController
// NewIngressController returns a new ingress controller
func NewIngressController(client federationclientset.Interface) *IngressController {
glog.V(4).Infof("->NewIngressController V(4)")
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "federated-ingress-controller"})
ic := &IngressController{
federatedApiClient: client,
ingressReviewDelay: time.Second * 10,
configMapReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
ingressBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
configMapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
}
// Build deliverers for triggering reconciliations.
ic.ingressDeliverer = util.NewDelayingDeliverer()
ic.clusterDeliverer = util.NewDelayingDeliverer()
ic.configMapDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, 0, false)
},
))
// Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some ingress operation succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
},
},
)
// Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil")
}
return targetClient.Core().ConfigMaps(uidConfigMapNamespace).List(options) // we only want to list one by name - unfortunately Kubernetes don't have a selector for that.
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil")
}
return targetClient.Core().ConfigMaps(uidConfigMapNamespace).Watch(options) // as above
},
},
&v1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it
// would be just confirmation that the configmap for the ingress controller is correct.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false)
//.........这里部分代码省略.........
示例10: Disable
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
// by tests which want to run 1000s of mock requests without slowing down.
func (b *URLBackoff) Disable() {
glog.V(4).Infof("Disabling backoff strategy")
b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
}
示例11: NewReplicaSetController
// NewclusterController returns a new cluster controller
func NewReplicaSetController(federationClient fedclientset.Interface) *ReplicaSetController {
frsc := &ReplicaSetController{
fedClient: federationClient,
replicasetDeliverer: fedutil.NewDelayingDeliverer(),
clusterDeliverer: fedutil.NewDelayingDeliverer(),
replicasetWorkQueue: workqueue.New(),
replicaSetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
defaultPlanner: planner.NewPlanner(&fed.FederatedReplicaSetPreferences{
Clusters: map[string]fed.ClusterReplicaSetPreferences{
"*": {Weight: 1},
},
}),
}
replicaSetFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return clientset.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) { frsc.deliverLocalReplicaSet(obj, allReplicaSetReviewDealy) },
),
)
}
clusterLifecycle := fedutil.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *fedv1.Cluster) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
ClusterUnavailable: func(cluster *fedv1.Cluster, _ []interface{}) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
}
frsc.fedReplicaSetInformer = fedutil.NewFederatedInformer(federationClient, replicaSetFedInformerFactory, &clusterLifecycle)
podFedInformerFactory := func(cluster *fedv1.Cluster, clientset kubeclientset.Interface) (cache.Store, framework.ControllerInterface) {
return framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return clientset.Core().Pods(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.Pod{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnAllChanges(
func(obj runtime.Object) {
frsc.clusterDeliverer.DeliverAfter(allClustersKey, nil, clusterUnavailableDelay)
},
),
)
}
frsc.fedPodInformer = fedutil.NewFederatedInformer(federationClient, podFedInformerFactory, &fedutil.ClusterLifecycleHandlerFuncs{})
frsc.replicaSetStore.Store, frsc.replicaSetController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return frsc.fedClient.Extensions().ReplicaSets(apiv1.NamespaceAll).Watch(options)
},
},
&extensionsv1.ReplicaSet{},
controller.NoResyncPeriodFunc(),
fedutil.NewTriggerOnMetaAndSpecChanges(
func(obj runtime.Object) { frsc.deliverFedReplicaSetObj(obj, replicaSetReviewDelay) },
),
)
return frsc
}
示例12: NewSecretController
// NewSecretController returns a new secret controller
func NewSecretController(client federationclientset.Interface) *SecretController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-secrets-controller"})
secretcontroller := &SecretController{
federatedApiClient: client,
secretReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
secretBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
secretcontroller.secretDeliverer = util.NewDelayingDeliverer()
secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return client.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return client.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
},
},
&api_v1.Secret{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
// Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
},
},
&api_v1.Secret{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some secret opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the secrets again.
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
},
},
)
// Federated updeater along with Create/Update/Delete operations.
secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Create(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Update(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api_v1.DeleteOptions{})
return err
})
secretcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
secretcontroller.hasFinalizerFunc,
secretcontroller.removeFinalizerFunc,
secretcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkg_runtime.Object) string {
secret := obj.(*api_v1.Secret)
return secret.Name
},
secretcontroller.updateTimeout,
secretcontroller.eventRecorder,
secretcontroller.secretFederatedInformer,
//.........这里部分代码省略.........
示例13: TestPuller
func TestPuller(t *testing.T) {
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "test_pod",
Namespace: "test-ns",
UID: "bar",
ResourceVersion: "42",
SelfLink: "/api/v1/pods/foo",
}}
cases := []struct {
containerImage string
policy api.PullPolicy
calledFunctions []string
inspectErr error
pullerErr error
expectedErr []error
}{
{ // pull missing image
containerImage: "missing_image",
policy: api.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil}},
{ // image present, dont pull
containerImage: "present_image",
policy: api.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// image present, pull it
{containerImage: "present_image",
policy: api.PullAlways,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{nil, nil, nil}},
// missing image, error PullNever
{containerImage: "missing_image",
policy: api.PullNever,
calledFunctions: []string{"IsImagePresent"},
inspectErr: nil,
pullerErr: nil,
expectedErr: []error{ErrImageNeverPull, ErrImageNeverPull, ErrImageNeverPull}},
// missing image, unable to inspect
{containerImage: "missing_image",
policy: api.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent"},
inspectErr: errors.New("unknown inspectError"),
pullerErr: nil,
expectedErr: []error{ErrImageInspect, ErrImageInspect, ErrImageInspect}},
// missing image, unable to fetch
{containerImage: "typo_image",
policy: api.PullIfNotPresent,
calledFunctions: []string{"IsImagePresent", "PullImage"},
inspectErr: nil,
pullerErr: errors.New("404"),
expectedErr: []error{ErrImagePull, ErrImagePull, ErrImagePullBackOff, ErrImagePull, ErrImagePullBackOff, ErrImagePullBackOff}},
}
for i, c := range cases {
container := &api.Container{
Name: "container_name",
Image: c.containerImage,
ImagePullPolicy: c.policy,
}
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
fakeClock := util.NewFakeClock(time.Now())
backOff.Clock = fakeClock
fakeRuntime := &ctest.FakeRuntime{}
fakeRecorder := &record.FakeRecorder{}
puller := newParallelImagePuller(fakeRecorder, fakeRuntime, backOff)
fakeRuntime.ImageList = []Image{{"present_image", nil, nil, 1}}
fakeRuntime.Err = c.pullerErr
fakeRuntime.InspectErr = c.inspectErr
for tick, expected := range c.expectedErr {
fakeClock.Step(time.Second)
err, _ := puller.pullImage(pod, container, nil)
fakeRuntime.AssertCalls(c.calledFunctions)
assert.Equal(t, expected, err, "in test %d tick=%d", i, tick)
}
}
}
示例14: NewDaemonSetController
// NewDaemonSetController returns a new daemonset controller
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-daemonset-controller"})
daemonsetcontroller := &DaemonSetController{
federatedApiClient: client,
daemonsetReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
daemonsetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build deliverers for triggering reconciliations.
daemonsetcontroller.daemonsetDeliverer = util.NewDelayingDeliverer()
daemonsetcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on daemonsets that should be federated.
daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
// Federated informer on daemonsets in members of federation.
daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some daemonset opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the daemonsets again.
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
if err != nil {
glog.Errorf("Error creating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully created deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil {
glog.Errorf("Error updating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully updating deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &api_v1.DeleteOptions{})
if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully deleting deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
//.........这里部分代码省略.........
示例15: NewConfigMapController
// NewConfigMapController returns a new configmap controller
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})
configmapcontroller := &ConfigMapController{
federatedApiClient: client,
configmapReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
configmapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
configmapcontroller.configmapDeliverer = util.NewDelayingDeliverer()
configmapcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer on federated API servers on configmaps that should be federated.
configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
// Federated informer on configmaps in members of federation.
configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some configmap opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the configmaps again.
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
return err
})
return configmapcontroller
}