本文整理匯總了Golang中k8s/io/kubernetes/federation/pkg/federation-controller/util.ObjectMetaAndSpecEquivalent函數的典型用法代碼示例。如果您正苦於以下問題:Golang ObjectMetaAndSpecEquivalent函數的具體用法?Golang ObjectMetaAndSpecEquivalent怎麽用?Golang ObjectMetaAndSpecEquivalent使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ObjectMetaAndSpecEquivalent函數的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: MetaAndSpecCheckingFunction
func MetaAndSpecCheckingFunction(expected runtime.Object) CheckingFunction {
return func(obj runtime.Object) error {
if util.ObjectMetaAndSpecEquivalent(obj, expected) {
return nil
}
return fmt.Errorf("Object different expected=%#v received=%#v", expected, obj)
}
}
示例2: waitForDaemonSetUpdateOrFail
func waitForDaemonSetUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name)
if err == nil { // We want it present, and the Get succeeded, so we're all good.
if util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset) {
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is updated", daemonset.Name, namespace))
return true, nil
} else {
By(fmt.Sprintf("Expected equal daemonsets. expected: %+v\nactual: %+v", *daemonset, *clusterDaemonSet))
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster, waiting for daemonset being updated, trying again in %s (err=%v)", daemonset.Name, namespace, framework.Poll, err))
return false, nil
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", daemonset.Name, namespace, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify daemonset %q in namespace %q in cluster", daemonset.Name, namespace)
}
示例3: waitForDaemonSetOrFail
func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
var clusterDaemonSet *v1beta1.DaemonSet
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name)
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is absent", daemonset.Name, namespace))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is present", daemonset.Name, namespace))
return true, nil // Success
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", daemonset.Name, namespace, clusterDaemonSet != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify daemonset %q in namespace %q in cluster: Present=%v", daemonset.Name, namespace, present)
if present && clusterDaemonSet != nil {
Expect(util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset))
}
}
示例4: reconcileNamespace
func (nc *NamespaceController) reconcileNamespace(namespace string) {
if !nc.isSynced() {
nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
return
}
baseNamespaceObj, exist, err := nc.namespaceInformerStore.GetByKey(namespace)
if err != nil {
glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err)
nc.deliverNamespace(namespace, 0, true)
return
}
if !exist {
// Not federated namespace, ignoring.
return
}
baseNamespace := baseNamespaceObj.(*api_v1.Namespace)
if baseNamespace.DeletionTimestamp != nil {
if err := nc.delete(baseNamespace); err != nil {
glog.Errorf("Failed to delete %s: %v", namespace, err)
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "DeleteFailed",
"Namespace delete failed: %v", err)
nc.deliverNamespace(namespace, 0, true)
}
return
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for namespace: %s",
baseNamespace.Name)
// Add the DeleteFromUnderlyingClusters finalizer before creating a namespace in
// underlying clusters.
// This ensures that the dependent namespaces are deleted in underlying
// clusters when the federated namespace is deleted.
updatedNamespaceObj, err := nc.deletionHelper.EnsureDeleteFromUnderlyingClustersFinalizer(baseNamespace)
if err != nil {
glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in namespace %s: %v",
baseNamespace.Name, err)
nc.deliverNamespace(namespace, 0, false)
return
}
baseNamespace = updatedNamespaceObj.(*api_v1.Namespace)
glog.V(3).Infof("Syncing namespace %s in underlying clusters", baseNamespace.Name)
// Sync the namespace in all underlying clusters.
clusters, err := nc.namespaceFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err)
nc.deliverNamespace(namespace, 0, true)
return
}
desiredNamespace := &api_v1.Namespace{
ObjectMeta: util.CopyObjectMeta(baseNamespace.ObjectMeta),
Spec: baseNamespace.Spec,
}
glog.V(5).Infof("Desired namespace in underlying clusters: %+v", desiredNamespace)
if !found {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "CreateInCluster",
"Creating namespace in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredNamespace,
ClusterName: cluster.Name,
})
} else {
clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace)
// Update existing namespace, if needed.
if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInCluster",
"Updating namespace in cluster %s. Desired: %+v\n Actual: %+v\n", cluster.Name, desiredNamespace, clusterNamespace)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredNamespace,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
return
}
glog.V(2).Infof("Updating namespace %s in underlying clusters. Operations: %d", baseNamespace.Name, len(operations))
err = nc.federatedUpdater.UpdateWithOnError(operations, nc.updateTimeout, func(op util.FederatedOperation, operror error) {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInClusterFailed",
//.........這裏部分代碼省略.........
示例5: reconcileIngress
//.........這裏部分代碼省略.........
glog.V(4).Infof("No annotation %q exists on ingress %q in federation and waiting for ingress in cluster %s. Not queueing create operation for ingress until annotation exists", staticIPNameKeyWritable, ingress, firstClusterName)
}
} else {
clusterIngress := clusterIngressObj.(*extensionsv1beta1.Ingress)
glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required (in either direction)", ingress, cluster.Name)
clusterIPName, clusterIPNameExists := clusterIngress.ObjectMeta.Annotations[staticIPNameKeyReadonly]
baseLBStatusExists := len(baseIngress.Status.LoadBalancer.Ingress) > 0
clusterLBStatusExists := len(clusterIngress.Status.LoadBalancer.Ingress) > 0
logStr := fmt.Sprintf("Cluster ingress %q has annotation %q=%q, loadbalancer status exists? [%v], federated ingress has annotation %q=%q, loadbalancer status exists? [%v]. %%s annotation and/or loadbalancer status from cluster ingress to federated ingress.", ingress, staticIPNameKeyReadonly, clusterIPName, clusterLBStatusExists, staticIPNameKeyWritable, baseIPName, baseLBStatusExists)
if (!baseIPAnnotationExists && clusterIPNameExists) || (!baseLBStatusExists && clusterLBStatusExists) { // copy the IP name from the readonly annotation on the cluster ingress, to the writable annotation on the federated ingress
glog.V(4).Infof(logStr, "Transferring")
if !baseIPAnnotationExists && clusterIPNameExists {
ic.updateAnnotationOnIngress(baseIngress, staticIPNameKeyWritable, clusterIPName)
return
}
if !baseLBStatusExists && clusterLBStatusExists {
lbstatusObj, lbErr := conversion.NewCloner().DeepCopy(&clusterIngress.Status.LoadBalancer)
lbstatus, ok := lbstatusObj.(*v1.LoadBalancerStatus)
if lbErr != nil || !ok {
glog.Errorf("Internal error: Failed to clone LoadBalancerStatus of %q in cluster %q while attempting to update master loadbalancer ingress status, will try again later. error: %v, Object to be cloned: %v", ingress, cluster.Name, lbErr, lbstatusObj)
ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
return
}
baseIngress.Status.LoadBalancer = *lbstatus
glog.V(4).Infof("Attempting to update base federated ingress status: %v", baseIngress)
if updatedFedIngress, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).UpdateStatus(baseIngress); err != nil {
glog.Errorf("Failed to update federated ingress status of %q (loadbalancer status), will try again later: %v", ingress, err)
ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
return
} else {
glog.V(4).Infof("Successfully updated federated ingress status of %q (added loadbalancer status), after update: %q", ingress, updatedFedIngress)
ic.deliverIngress(ingress, ic.smallDelay, false)
return
}
}
} else {
glog.V(4).Infof(logStr, "Not transferring")
}
// Update existing cluster ingress, if needed.
if util.ObjectMetaAndSpecEquivalent(baseIngress, clusterIngress) {
glog.V(4).Infof("Ingress %q in cluster %q does not need an update: cluster ingress is equivalent to federated ingress", ingress, cluster.Name)
} else {
glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
objMeta, err := conversion.NewCloner().DeepCopy(clusterIngress.ObjectMeta)
if err != nil {
glog.Errorf("Error deep copying ObjectMeta: %v", err)
ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
}
desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
if !ok {
glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
}
// Merge any annotations and labels on the federated ingress onto the underlying cluster ingress,
// overwriting duplicates.
if desiredIngress.ObjectMeta.Annotations == nil {
desiredIngress.ObjectMeta.Annotations = make(map[string]string)
}
for key, val := range baseIngress.ObjectMeta.Annotations {
desiredIngress.ObjectMeta.Annotations[key] = val
}
if desiredIngress.ObjectMeta.Labels == nil {
desiredIngress.ObjectMeta.Labels = make(map[string]string)
}
for key, val := range baseIngress.ObjectMeta.Labels {
desiredIngress.ObjectMeta.Labels[key] = val
}
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
"Updating ingress in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredIngress,
ClusterName: cluster.Name,
})
// TODO: Transfer any readonly (target-proxy, url-map etc) annotations from the master cluster to the federation, if this is the master cluster.
// This is only for consistency, so that the federation ingress metadata matches the underlying clusters. It's not actually required }
}
}
}
if len(operations) == 0 {
// Everything is in order
glog.V(4).Infof("Ingress %q is up-to-date in all clusters - no propagation to clusters required.", ingress)
return
}
glog.V(4).Infof("Calling federatedUpdater.Update() - operations: %v", operations)
err = ic.federatedIngressUpdater.UpdateWithOnError(operations, ic.updateTimeout, func(op util.FederatedOperation, operror error) {
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "FailedClusterUpdate",
"Ingress update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", ingress, err)
ic.deliverIngress(ingress, ic.ingressReviewDelay, true)
return
}
// Schedule another periodic reconciliation, only to account for possible bugs in watch processing.
ic.deliverIngress(ingress, ic.ingressReviewDelay, false)
}
示例6: reconcileReplicaSet
//.........這裏部分代碼省略.........
if err != nil {
return statusError, err
}
// collect current status and do schedule
allPods, err := frsc.fedPodInformer.GetTargetStore().List()
if err != nil {
return statusError, err
}
podStatus, err := podanalyzer.AnalysePods(frs.Spec.Selector, allPods, time.Now())
current := make(map[string]int64)
estimatedCapacity := make(map[string]int64)
for _, cluster := range clusters {
lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
return statusError, err
}
if exists {
lrs := lrsObj.(*extensionsv1.ReplicaSet)
current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well?
unschedulable := int64(podStatus[cluster.Name].Unschedulable)
if unschedulable > 0 {
estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable
}
}
}
scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity)
glog.V(4).Infof("Start syncing local replicaset %s: %v", key, scheduleResult)
fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation}
operations := make([]fedutil.FederatedOperation, 0)
for clusterName, replicas := range scheduleResult {
lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key)
if err != nil {
return statusError, err
}
lrs := &extensionsv1.ReplicaSet{
ObjectMeta: fedutil.CopyObjectMeta(frs.ObjectMeta),
Spec: frs.Spec,
}
specReplicas := int32(replicas)
lrs.Spec.Replicas = &specReplicas
if !exists {
if replicas > 0 {
frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "CreateInCluster",
"Creating replicaset in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeAdd,
Obj: lrs,
ClusterName: clusterName,
})
}
} else {
currentLrs := lrsObj.(*extensionsv1.ReplicaSet)
// Update existing replica set, if needed.
if !fedutil.ObjectMetaAndSpecEquivalent(lrs, currentLrs) {
frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "UpdateInCluster",
"Updating replicaset in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeUpdate,
Obj: lrs,
ClusterName: clusterName,
})
}
fedStatus.Replicas += currentLrs.Status.Replicas
fedStatus.FullyLabeledReplicas += currentLrs.Status.FullyLabeledReplicas
// leave the replicaset even the replicas dropped to 0
}
}
if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas {
frs.Status = fedStatus
_, err = frsc.fedClient.Extensions().ReplicaSets(frs.Namespace).UpdateStatus(frs)
if err != nil {
return statusError, err
}
}
if len(operations) == 0 {
// Everything is in order
return statusAllOk, nil
}
err = frsc.fedUpdater.UpdateWithOnError(operations, updateTimeout, func(op fedutil.FederatedOperation, operror error) {
frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "FailedUpdateInCluster",
"Replicaset update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", key, err)
return statusError, err
}
// Some operations were made, reconcile after a while.
return statusNeedRecheck, nil
}
示例7: reconcileDeployment
//.........這裏部分代碼省略.........
if err != nil {
return statusError, err
}
podStatus, err := podanalyzer.AnalysePods(fd.Spec.Selector, allPods, time.Now())
current := make(map[string]int64)
estimatedCapacity := make(map[string]int64)
for _, cluster := range clusters {
ldObj, exists, err := fdc.fedDeploymentInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
return statusError, err
}
if exists {
ld := ldObj.(*extensionsv1.Deployment)
current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well?
unschedulable := int64(podStatus[cluster.Name].Unschedulable)
if unschedulable > 0 {
estimatedCapacity[cluster.Name] = int64(*ld.Spec.Replicas) - unschedulable
}
}
}
scheduleResult := fdc.schedule(fd, clusters, current, estimatedCapacity)
glog.V(4).Infof("Start syncing local deployment %s: %v", key, scheduleResult)
fedStatus := extensionsv1.DeploymentStatus{ObservedGeneration: fd.Generation}
operations := make([]fedutil.FederatedOperation, 0)
for clusterName, replicas := range scheduleResult {
ldObj, exists, err := fdc.fedDeploymentInformer.GetTargetStore().GetByKey(clusterName, key)
if err != nil {
return statusError, err
}
// The object can be modified.
ld := &extensionsv1.Deployment{
ObjectMeta: fedutil.DeepCopyRelevantObjectMeta(fd.ObjectMeta),
Spec: fedutil.DeepCopyApiTypeOrPanic(fd.Spec).(extensionsv1.DeploymentSpec),
}
specReplicas := int32(replicas)
ld.Spec.Replicas = &specReplicas
if !exists {
if replicas > 0 {
fdc.eventRecorder.Eventf(fd, api.EventTypeNormal, "CreateInCluster",
"Creating deployment in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeAdd,
Obj: ld,
ClusterName: clusterName,
})
}
} else {
// TODO: Update only one deployment at a time if update strategy is rolling udpate.
currentLd := ldObj.(*extensionsv1.Deployment)
// Update existing replica set, if needed.
if !fedutil.ObjectMetaAndSpecEquivalent(ld, currentLd) {
fdc.eventRecorder.Eventf(fd, api.EventTypeNormal, "UpdateInCluster",
"Updating deployment in cluster %s", clusterName)
operations = append(operations, fedutil.FederatedOperation{
Type: fedutil.OperationTypeUpdate,
Obj: ld,
ClusterName: clusterName,
})
glog.Infof("Updating %s in %s", currentLd.Name, clusterName)
}
fedStatus.Replicas += currentLd.Status.Replicas
fedStatus.AvailableReplicas += currentLd.Status.AvailableReplicas
fedStatus.UnavailableReplicas += currentLd.Status.UnavailableReplicas
}
}
if fedStatus.Replicas != fd.Status.Replicas ||
fedStatus.AvailableReplicas != fd.Status.AvailableReplicas ||
fedStatus.UnavailableReplicas != fd.Status.UnavailableReplicas {
fd.Status = fedStatus
_, err = fdc.fedClient.Extensions().Deployments(fd.Namespace).UpdateStatus(fd)
if err != nil {
return statusError, err
}
}
if len(operations) == 0 {
// Everything is in order
return statusAllOk, nil
}
err = fdc.fedUpdater.UpdateWithOnError(operations, updateTimeout, func(op fedutil.FederatedOperation, operror error) {
fdc.eventRecorder.Eventf(fd, api.EventTypeNormal, "FailedUpdateInCluster",
"Deployment update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", key, err)
return statusError, err
}
// Some operations were made, reconcile after a while.
return statusNeedRecheck, nil
}
示例8: reconcileNamespace
func (nc *NamespaceController) reconcileNamespace(namespace string) {
if !nc.isSynced() {
nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
return
}
baseNamespaceObj, exist, err := nc.namespaceInformerStore.GetByKey(namespace)
if err != nil {
glog.Errorf("Failed to query main namespace store for %v: %v", namespace, err)
nc.deliverNamespace(namespace, 0, true)
return
}
if !exist {
// Not federated namespace, ignoring.
return
}
baseNamespace := baseNamespaceObj.(*api_v1.Namespace)
if baseNamespace.DeletionTimestamp != nil {
if err := nc.delete(baseNamespace); err != nil {
glog.Errorf("Failed to delete %s: %v", namespace, err)
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "DeleteFailed",
"Namespace delete failed: %v", err)
nc.deliverNamespace(namespace, 0, true)
}
return
}
clusters, err := nc.namespaceFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
nc.deliverNamespace(namespace, nc.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterNamespaceObj, found, err := nc.namespaceFederatedInformer.GetTargetStore().GetByKey(cluster.Name, namespace)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", namespace, cluster.Name, err)
nc.deliverNamespace(namespace, 0, true)
return
}
desiredNamespace := &api_v1.Namespace{
ObjectMeta: util.CopyObjectMeta(baseNamespace.ObjectMeta),
Spec: baseNamespace.Spec,
}
if !found {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "CreateInCluster",
"Creating namespace in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredNamespace,
ClusterName: cluster.Name,
})
} else {
clusterNamespace := clusterNamespaceObj.(*api_v1.Namespace)
// Update existing namespace, if needed.
if !util.ObjectMetaAndSpecEquivalent(desiredNamespace, clusterNamespace) {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInCluster",
"Updating namespace in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredNamespace,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
return
}
err = nc.federatedUpdater.UpdateWithOnError(operations, nc.updateTimeout, func(op util.FederatedOperation, operror error) {
nc.eventRecorder.Eventf(baseNamespace, api.EventTypeNormal, "UpdateInClusterFailed",
"Namespace update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", namespace, err)
nc.deliverNamespace(namespace, 0, true)
return
}
// Evertyhing is in order but lets be double sure
nc.deliverNamespace(namespace, nc.namespaceReviewDelay, false)
}