本文整理匯總了Golang中k8s/io/kubernetes/federation/client/clientset_generated/federation_release_1_5.Interface類的典型用法代碼示例。如果您正苦於以下問題:Golang Interface類的具體用法?Golang Interface怎麽用?Golang Interface使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Interface類的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: persistFedServiceUpdate
func (cc *clusterClientCache) persistFedServiceUpdate(cachedService *cachedService, fedClient fedclientset.Interface) error {
service := cachedService.lastState
glog.V(5).Infof("Persist federation service status %s/%s", service.Namespace, service.Name)
var err error
for i := 0; i < clientRetryCount; i++ {
_, err := fedClient.Core().Services(service.Namespace).Get(service.Name, metav1.GetOptions{})
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
_, err = fedClient.Core().Services(service.Namespace).UpdateStatus(service)
if err == nil {
glog.V(2).Infof("Successfully update service %s/%s to federation apiserver", service.Namespace, service.Name)
return nil
}
if errors.IsNotFound(err) {
glog.Infof("Not persisting update to service '%s/%s' that no longer exists: %v",
service.Namespace, service.Name, err)
return nil
}
if errors.IsConflict(err) {
glog.V(4).Infof("Not persisting update to service '%s/%s' that has been changed since we received it: %v",
service.Namespace, service.Name, err)
return err
}
time.Sleep(cachedService.nextFedUpdateDelay())
}
return err
}
示例2: NewNamespaceController
// NewNamespaceController returns a new namespace controller
func NewNamespaceController(client federationclientset.Interface) *NamespaceController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-namespace-controller"})
nc := &NamespaceController{
federatedApiClient: client,
namespaceReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
namespaceBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
nc.namespaceDeliverer = util.NewDelayingDeliverer()
nc.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on namespaces that should be federated.
nc.namespaceInformerStore, nc.namespaceInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return client.Core().Namespaces().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return client.Core().Namespaces().Watch(options)
},
},
&api_v1.Namespace{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { nc.deliverNamespaceObj(obj, 0, false) }))
// Federated informer on namespaces in members of federation.
nc.namespaceFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Core().Namespaces().List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return targetClient.Core().Namespaces().Watch(options)
},
},
&api_v1.Namespace{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some namespace opration succeeded.
util.NewTriggerOnMetaAndSpecChanges(
func(obj pkg_runtime.Object) { nc.deliverNamespaceObj(obj, nc.namespaceReviewDelay, false) },
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the namespaces again.
nc.clusterDeliverer.DeliverAfter(allClustersKey, nil, nc.clusterAvailableDelay)
},
},
)
// Federated updeater along with Create/Update/Delete operations.
nc.federatedUpdater = util.NewFederatedUpdater(nc.namespaceFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
_, err := client.Core().Namespaces().Create(namespace)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
_, err := client.Core().Namespaces().Update(namespace)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
namespace := obj.(*api_v1.Namespace)
err := client.Core().Namespaces().Delete(namespace.Name, &api.DeleteOptions{})
return err
})
return nc
}
示例3: NewIngressController
// NewIngressController returns a new ingress controller
func NewIngressController(client federationclientset.Interface) *IngressController {
glog.V(4).Infof("->NewIngressController V(4)")
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "federated-ingress-controller"})
ic := &IngressController{
federatedApiClient: client,
ingressReviewDelay: time.Second * 10,
configMapReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
ingressBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
configMapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
}
// Build deliverers for triggering reconciliations.
ic.ingressDeliverer = util.NewDelayingDeliverer()
ic.clusterDeliverer = util.NewDelayingDeliverer()
ic.configMapDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on ingresses that should be federated.
ic.ingressInformerStore, ic.ingressInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return client.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, 0, false)
},
))
// Federated informer on ingresses in members of federation.
ic.ingressFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().Ingresses(api.NamespaceAll).Watch(options)
},
},
&extensionsv1beta1.Ingress{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some ingress operation succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverIngressObj(obj, ic.ingressReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the ingresses again, and configure it's ingress controller's configmap with the correct UID
ic.clusterDeliverer.DeliverAfter(cluster.Name, cluster, ic.clusterAvailableDelay)
},
},
)
// Federated informer on configmaps for ingress controllers in members of the federation.
ic.configMapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
glog.V(4).Infof("Returning new informer for cluster %q", cluster.Name)
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (pkgruntime.Object, error) {
if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil")
}
return targetClient.Core().ConfigMaps(uidConfigMapNamespace).List(options) // we only want to list one by name - unfortunately Kubernetes don't have a selector for that.
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if targetClient == nil {
glog.Errorf("Internal error: targetClient is nil")
}
return targetClient.Core().ConfigMaps(uidConfigMapNamespace).Watch(options) // as above
},
},
&v1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconcilation whenever the ingress controller's configmap in a federated cluster is changed. In most cases it
// would be just confirmation that the configmap for the ingress controller is correct.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
ic.deliverConfigMapObj(cluster.Name, obj, ic.configMapReviewDelay, false)
//.........這裏部分代碼省略.........
示例4: NewConfigMapController
// NewConfigMapController returns a new configmap controller
func NewConfigMapController(client federationclientset.Interface) *ConfigMapController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(apiv1.EventSource{Component: "federated-configmaps-controller"})
configmapcontroller := &ConfigMapController{
federatedApiClient: client,
configmapReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
configmapBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
configmapcontroller.configmapDeliverer = util.NewDelayingDeliverer()
configmapcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer on federated API servers on configmaps that should be federated.
configmapcontroller.configmapInformerStore, configmapcontroller.configmapInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return client.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkgruntime.Object) { configmapcontroller.deliverConfigMapObj(obj, 0, false) }))
// Federated informer on configmaps in members of federation.
configmapcontroller.configmapFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federationapi.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return targetClient.Core().ConfigMaps(apiv1.NamespaceAll).Watch(options)
},
},
&apiv1.ConfigMap{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some configmap opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkgruntime.Object) {
configmapcontroller.deliverConfigMapObj(obj, configmapcontroller.configmapReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federationapi.Cluster) {
// When new cluster becomes available process all the configmaps again.
configmapcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(configmapcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
configmapcontroller.federatedUpdater = util.NewFederatedUpdater(configmapcontroller.configmapFederatedInformer,
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Create(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
_, err := client.Core().ConfigMaps(configmap.Namespace).Update(configmap)
return err
},
func(client kubeclientset.Interface, obj pkgruntime.Object) error {
configmap := obj.(*apiv1.ConfigMap)
err := client.Core().ConfigMaps(configmap.Namespace).Delete(configmap.Name, &apiv1.DeleteOptions{})
return err
})
return configmapcontroller
}
示例5: NewFederatedInformer
// Builds a FederatedInformer for the given federation client and factory.
func NewFederatedInformer(
federationClient federationclientset.Interface,
targetInformerFactory TargetInformerFactory,
clusterLifecycle *ClusterLifecycleHandlerFuncs) FederatedInformer {
federatedInformer := &federatedInformerImpl{
targetInformerFactory: targetInformerFactory,
clientFactory: func(cluster *federationapi.Cluster) (kubeclientset.Interface, error) {
clusterConfig, err := BuildClusterConfig(cluster)
if err == nil && clusterConfig != nil {
clientset := kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(clusterConfig, userAgentName))
return clientset, nil
}
return nil, err
},
targetInformers: make(map[string]informer),
}
getClusterData := func(name string) []interface{} {
data, err := federatedInformer.GetTargetStore().ListFromCluster(name)
if err != nil {
glog.Errorf("Failed to list %s content: %v", name, err)
return make([]interface{}, 0)
}
return data
}
federatedInformer.clusterInformer.store, federatedInformer.clusterInformer.controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options apiv1.ListOptions) (pkgruntime.Object, error) {
return federationClient.Federation().Clusters().List(options)
},
WatchFunc: func(options apiv1.ListOptions) (watch.Interface, error) {
return federationClient.Federation().Clusters().Watch(options)
},
},
&federationapi.Cluster{},
clusterSyncPeriod,
cache.ResourceEventHandlerFuncs{
DeleteFunc: func(old interface{}) {
oldCluster, ok := old.(*federationapi.Cluster)
if ok {
var data []interface{}
if clusterLifecycle.ClusterUnavailable != nil {
data = getClusterData(oldCluster.Name)
}
federatedInformer.deleteCluster(oldCluster)
if clusterLifecycle.ClusterUnavailable != nil {
clusterLifecycle.ClusterUnavailable(oldCluster, data)
}
}
},
AddFunc: func(cur interface{}) {
curCluster, ok := cur.(*federationapi.Cluster)
if ok && isClusterReady(curCluster) {
federatedInformer.addCluster(curCluster)
if clusterLifecycle.ClusterAvailable != nil {
clusterLifecycle.ClusterAvailable(curCluster)
}
} else {
glog.Errorf("Cluster %v not added. Not of correct type, or cluster not ready.", cur)
}
},
UpdateFunc: func(old, cur interface{}) {
oldCluster, ok := old.(*federationapi.Cluster)
if !ok {
glog.Errorf("Internal error: Cluster %v not updated. Old cluster not of correct type.", old)
return
}
curCluster, ok := cur.(*federationapi.Cluster)
if !ok {
glog.Errorf("Internal error: Cluster %v not updated. New cluster not of correct type.", cur)
return
}
if isClusterReady(oldCluster) != isClusterReady(curCluster) || !reflect.DeepEqual(oldCluster.Spec, curCluster.Spec) || !reflect.DeepEqual(oldCluster.ObjectMeta.Annotations, curCluster.ObjectMeta.Annotations) {
var data []interface{}
if clusterLifecycle.ClusterUnavailable != nil {
data = getClusterData(oldCluster.Name)
}
federatedInformer.deleteCluster(oldCluster)
if clusterLifecycle.ClusterUnavailable != nil {
clusterLifecycle.ClusterUnavailable(oldCluster, data)
}
if isClusterReady(curCluster) {
federatedInformer.addCluster(curCluster)
if clusterLifecycle.ClusterAvailable != nil {
clusterLifecycle.ClusterAvailable(curCluster)
}
}
} else {
glog.V(4).Infof("Cluster %v not updated to %v as ready status and specs are identical", oldCluster, curCluster)
}
},
},
)
return federatedInformer
}
示例6: NewDaemonSetController
// NewDaemonSetController returns a new daemonset controller
func NewDaemonSetController(client federationclientset.Interface) *DaemonSetController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api_v1.EventSource{Component: "federated-daemonset-controller"})
daemonsetcontroller := &DaemonSetController{
federatedApiClient: client,
daemonsetReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
daemonsetBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build deliverers for triggering reconciliations.
daemonsetcontroller.daemonsetDeliverer = util.NewDelayingDeliverer()
daemonsetcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on daemonsets that should be federated.
daemonsetcontroller.daemonsetInformerStore, daemonsetcontroller.daemonsetInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return client.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { daemonsetcontroller.deliverDaemonSetObj(obj, 0, false) }))
// Federated informer on daemonsets in members of federation.
daemonsetcontroller.daemonsetFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api_v1.ListOptions) (pkg_runtime.Object, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).List(options)
},
WatchFunc: func(options api_v1.ListOptions) (watch.Interface, error) {
return targetClient.Extensions().DaemonSets(api_v1.NamespaceAll).Watch(options)
},
},
&extensionsv1.DaemonSet{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some daemonset opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
daemonsetcontroller.deliverDaemonSetObj(obj, daemonsetcontroller.daemonsetReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the daemonsets again.
daemonsetcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(daemonsetcontroller.clusterAvailableDelay))
},
},
)
// Federated updater along with Create/Update/Delete operations.
daemonsetcontroller.federatedUpdater = util.NewFederatedUpdater(daemonsetcontroller.daemonsetFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to create daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Create(daemonset)
if err != nil {
glog.Errorf("Error creating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully created deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to update daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
_, err := client.Extensions().DaemonSets(daemonset.Namespace).Update(daemonset)
if err != nil {
glog.Errorf("Error updating daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully updating deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
daemonset := obj.(*extensionsv1.DaemonSet)
glog.V(4).Infof("Attempting to delete daemonset: %s/%s", daemonset.Namespace, daemonset.Name)
err := client.Extensions().DaemonSets(daemonset.Namespace).Delete(daemonset.Name, &api_v1.DeleteOptions{})
if err != nil {
glog.Errorf("Error deleting daemonset %s/%s/: %v", daemonset.Namespace, daemonset.Name, err)
} else {
glog.V(4).Infof("Successfully deleting deamonset %s/%s", daemonset.Namespace, daemonset.Name)
}
return err
//.........這裏部分代碼省略.........
示例7: syncService
// Whenever there is change on service, the federation service should be updated
func (cc *clusterClientCache) syncService(key, clusterName string, clusterCache *clusterCache, serviceCache *serviceCache, fedClient fedclientset.Interface, sc *ServiceController) error {
// obj holds the latest service info from apiserver, return if there is no federation cache for the service
cachedService, ok := serviceCache.get(key)
if !ok {
// if serviceCache does not exists, that means the service is not created by federation, we should skip it
return nil
}
serviceInterface, exists, err := clusterCache.serviceStore.Indexer.GetByKey(key)
if err != nil {
glog.Errorf("Did not successfully get %v from store: %v, will retry later", key, err)
clusterCache.serviceQueue.Add(key)
return err
}
var needUpdate, isDeletion bool
if exists {
service, ok := serviceInterface.(*v1.Service)
if ok {
glog.V(4).Infof("Found service for federation service %s/%s from cluster %s", service.Namespace, service.Name, clusterName)
needUpdate = cc.processServiceUpdate(cachedService, service, clusterName)
} else {
_, ok := serviceInterface.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Object contained wasn't a service or a deleted key: %+v", serviceInterface)
}
glog.Infof("Found tombstone for %v", key)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
} else {
glog.Infof("Can not get service %v for cluster %s from serviceStore", key, clusterName)
needUpdate = cc.processServiceDeletion(cachedService, clusterName)
isDeletion = true
}
if needUpdate {
for i := 0; i < clientRetryCount; i++ {
err := sc.ensureDnsRecords(clusterName, cachedService)
if err == nil {
break
}
glog.V(4).Infof("Error ensuring DNS Records for service %s on cluster %s: %v", key, clusterName, err)
time.Sleep(cachedService.nextDNSUpdateDelay())
clusterCache.serviceQueue.Add(key)
// did not retry here as we still want to persist federation apiserver even ensure dns records fails
}
err := cc.persistFedServiceUpdate(cachedService, fedClient)
if err == nil {
cachedService.appliedState = cachedService.lastState
cachedService.resetFedUpdateDelay()
} else {
if err != nil {
glog.Errorf("Failed to sync service: %+v, put back to service queue", err)
clusterCache.serviceQueue.Add(key)
}
}
}
if isDeletion {
// cachedService is not reliable here as
// deleting cache is the last step of federation service deletion
_, err := fedClient.Core().Services(cachedService.lastState.Namespace).Get(cachedService.lastState.Name, metav1.GetOptions{})
// rebuild service if federation service still exists
if err == nil || !errors.IsNotFound(err) {
return sc.ensureClusterService(cachedService, clusterName, cachedService.appliedState, clusterCache.clientset)
}
}
return nil
}
示例8: NewSecretController
// NewSecretController returns a new secret controller
func NewSecretController(client federationclientset.Interface) *SecretController {
broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(eventsink.NewFederatedEventSink(client))
recorder := broadcaster.NewRecorder(api.EventSource{Component: "federated-secrets-controller"})
secretcontroller := &SecretController{
federatedApiClient: client,
secretReviewDelay: time.Second * 10,
clusterAvailableDelay: time.Second * 20,
smallDelay: time.Second * 3,
updateTimeout: time.Second * 30,
secretBackoff: flowcontrol.NewBackOff(5*time.Second, time.Minute),
eventRecorder: recorder,
}
// Build delivereres for triggering reconciliations.
secretcontroller.secretDeliverer = util.NewDelayingDeliverer()
secretcontroller.clusterDeliverer = util.NewDelayingDeliverer()
// Start informer in federated API servers on secrets that should be federated.
secretcontroller.secretInformerStore, secretcontroller.secretInformerController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return client.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return client.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
},
},
&api_v1.Secret{},
controller.NoResyncPeriodFunc(),
util.NewTriggerOnAllChanges(func(obj pkg_runtime.Object) { secretcontroller.deliverSecretObj(obj, 0, false) }))
// Federated informer on secrets in members of federation.
secretcontroller.secretFederatedInformer = util.NewFederatedInformer(
client,
func(cluster *federation_api.Cluster, targetClient kubeclientset.Interface) (cache.Store, cache.ControllerInterface) {
return cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return targetClient.Core().Secrets(api_v1.NamespaceAll).List(versionedOptions)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
versionedOptions := util.VersionizeV1ListOptions(options)
return targetClient.Core().Secrets(api_v1.NamespaceAll).Watch(versionedOptions)
},
},
&api_v1.Secret{},
controller.NoResyncPeriodFunc(),
// Trigger reconciliation whenever something in federated cluster is changed. In most cases it
// would be just confirmation that some secret opration succeeded.
util.NewTriggerOnAllChanges(
func(obj pkg_runtime.Object) {
secretcontroller.deliverSecretObj(obj, secretcontroller.secretReviewDelay, false)
},
))
},
&util.ClusterLifecycleHandlerFuncs{
ClusterAvailable: func(cluster *federation_api.Cluster) {
// When new cluster becomes available process all the secrets again.
secretcontroller.clusterDeliverer.DeliverAt(allClustersKey, nil, time.Now().Add(secretcontroller.clusterAvailableDelay))
},
},
)
// Federated updeater along with Create/Update/Delete operations.
secretcontroller.federatedUpdater = util.NewFederatedUpdater(secretcontroller.secretFederatedInformer,
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Create(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
_, err := client.Core().Secrets(secret.Namespace).Update(secret)
return err
},
func(client kubeclientset.Interface, obj pkg_runtime.Object) error {
secret := obj.(*api_v1.Secret)
err := client.Core().Secrets(secret.Namespace).Delete(secret.Name, &api_v1.DeleteOptions{})
return err
})
secretcontroller.deletionHelper = deletionhelper.NewDeletionHelper(
secretcontroller.hasFinalizerFunc,
secretcontroller.removeFinalizerFunc,
secretcontroller.addFinalizerFunc,
// objNameFunc
func(obj pkg_runtime.Object) string {
secret := obj.(*api_v1.Secret)
return secret.Name
},
secretcontroller.updateTimeout,
secretcontroller.eventRecorder,
secretcontroller.secretFederatedInformer,
//.........這裏部分代碼省略.........