本文整理匯總了Golang中k8s/io/kubernetes/pkg/types.NamespacedName.String方法的典型用法代碼示例。如果您正苦於以下問題:Golang NamespacedName.String方法的具體用法?Golang NamespacedName.String怎麽用?Golang NamespacedName.String使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/kubernetes/pkg/types.NamespacedName
的用法示例。
在下文中一共展示了NamespacedName.String方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: deliverSecret
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (secretcontroller *SecretController) deliverSecret(secret types.NamespacedName, delay time.Duration, failed bool) {
key := secret.String()
if failed {
secretcontroller.secretBackoff.Next(key, time.Now())
delay = delay + secretcontroller.secretBackoff.Get(key)
} else {
secretcontroller.secretBackoff.Reset(key)
}
secretcontroller.secretDeliverer.DeliverAfter(key, &secret, delay)
}
示例2: deliverConfigMap
// Adds backoff to delay if this delivery is related to some failure. Resets backoff if there was no failure.
func (configmapcontroller *ConfigMapController) deliverConfigMap(configmap types.NamespacedName, delay time.Duration, failed bool) {
key := configmap.String()
if failed {
configmapcontroller.configmapBackoff.Next(key, time.Now())
delay = delay + configmapcontroller.configmapBackoff.Get(key)
} else {
configmapcontroller.configmapBackoff.Reset(key)
}
configmapcontroller.configmapDeliverer.DeliverAfter(key, &configmap, delay)
}
示例3: deliverIngress
func (ic *IngressController) deliverIngress(ingress types.NamespacedName, delay time.Duration, failed bool) {
glog.V(4).Infof("Delivering ingress: %s with delay: %v error: %v", ingress, delay, failed)
key := ingress.String()
if failed {
ic.ingressBackoff.Next(key, time.Now())
delay = delay + ic.ingressBackoff.Get(key)
} else {
ic.ingressBackoff.Reset(key)
}
ic.ingressDeliverer.DeliverAfter(key, ingress, delay)
}
示例4: processDelta
// Returns an error if processing the delta failed, along with a time.Duration
// indicating whether processing should be retried; zero means no-retry; otherwise
// we should retry in that Duration.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, time.Duration) {
var (
namespacedName types.NamespacedName
cachedService *cachedService
)
deltaService, ok := delta.Object.(*api.Service)
if ok {
namespacedName.Namespace = deltaService.Namespace
namespacedName.Name = deltaService.Name
cachedService = s.cache.getOrCreate(namespacedName.String())
} else {
// If the DeltaFIFO saw a key in our cache that it didn't know about, it
// can send a deletion with an unknown state. Grab the service from our
// cache for deleting.
key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("delta contained object that wasn't a service or a deleted key: %+v", delta), doNotRetry
}
cachedService, ok = s.cache.get(key.Key)
if !ok {
return fmt.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), doNotRetry
}
deltaService = cachedService.lastState
delta.Object = deltaService
namespacedName = types.NamespacedName{Namespace: deltaService.Namespace, Name: deltaService.Name}
}
glog.V(2).Infof("Got new %s delta for service: %v", delta.Type, namespacedName)
// Ensure that no other goroutine will interfere with our processing of the
// service.
cachedService.mu.Lock()
defer cachedService.mu.Unlock()
// Get the most recent state of the service from the API directly rather than
// trusting the body of the delta. This avoids update re-ordering problems.
// TODO: Handle sync delta types differently rather than doing a get on every
// service every time we sync?
service, err := s.kubeClient.Core().Services(namespacedName.Namespace).Get(namespacedName.Name)
if err != nil && !errors.IsNotFound(err) {
glog.Warningf("Failed to get most recent state of service %v from API (will retry): %v", namespacedName, err)
return err, cachedService.nextRetryDelay()
}
if errors.IsNotFound(err) {
glog.V(2).Infof("Service %v not found, ensuring load balancer is deleted", namespacedName)
s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
err := s.balancer.EnsureLoadBalancerDeleted(deltaService)
if err != nil {
message := "Error deleting load balancer (will retry): " + err.Error()
s.eventRecorder.Event(deltaService, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
return err, cachedService.nextRetryDelay()
}
s.eventRecorder.Event(deltaService, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
s.cache.delete(namespacedName.String())
cachedService.resetRetryDelay()
return nil, doNotRetry
}
// Update the cached service (used above for populating synthetic deletes)
cachedService.lastState = service
err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
if err != nil {
message := "Error creating load balancer"
if retry {
message += " (will retry): "
} else {
message += " (will not retry): "
}
message += err.Error()
s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)
return err, cachedService.nextRetryDelay()
}
// Always update the cache upon success.
// NOTE: Since we update the cached service if and only if we successfully
// processed it, a cached service being nil implies that it hasn't yet
// been successfully processed.
cachedService.appliedState = service
s.cache.set(namespacedName.String(), cachedService)
cachedService.resetRetryDelay()
return nil, doNotRetry
}
示例5: ensureLoadBalancer
func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB, proxyProtocol bool) (*elb.LoadBalancerDescription, error) {
loadBalancer, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, err
}
dirty := false
if loadBalancer == nil {
createRequest := &elb.CreateLoadBalancerInput{}
createRequest.LoadBalancerName = aws.String(loadBalancerName)
createRequest.Listeners = listeners
if internalELB {
createRequest.Scheme = aws.String("internal")
}
// We are supposed to specify one subnet per AZ.
// TODO: What happens if we have more than one subnet per AZ?
createRequest.Subnets = stringPointerArray(subnetIDs)
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
createRequest.Tags = []*elb.Tag{
{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(s.getClusterName())},
{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
}
glog.Infof("Creating load balancer for %v with name: ", namespacedName, loadBalancerName)
_, err := s.elb.CreateLoadBalancer(createRequest)
if err != nil {
return nil, err
}
if proxyProtocol {
err = s.createProxyProtocolPolicy(loadBalancerName)
if err != nil {
return nil, err
}
for _, listener := range listeners {
glog.V(2).Infof("Adjusting AWS loadbalancer proxy protocol on node port %d. Setting to true", *listener.InstancePort)
err := s.setBackendPolicies(loadBalancerName, *listener.InstancePort, []*string{aws.String(ProxyProtocolPolicyName)})
if err != nil {
return nil, err
}
}
}
dirty = true
} else {
// TODO: Sync internal vs non-internal
{
// Sync subnets
expected := sets.NewString(subnetIDs...)
actual := stringSetFromPointers(loadBalancer.Subnets)
additions := expected.Difference(actual)
removals := actual.Difference(expected)
if removals.Len() != 0 {
request := &elb.DetachLoadBalancerFromSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(removals)
glog.V(2).Info("Detaching load balancer from removed subnets")
_, err := s.elb.DetachLoadBalancerFromSubnets(request)
if err != nil {
return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
}
dirty = true
}
if additions.Len() != 0 {
request := &elb.AttachLoadBalancerToSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(additions)
glog.V(2).Info("Attaching load balancer to added subnets")
_, err := s.elb.AttachLoadBalancerToSubnets(request)
if err != nil {
return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
}
dirty = true
}
}
{
// Sync security groups
expected := sets.NewString(securityGroupIDs...)
actual := stringSetFromPointers(loadBalancer.SecurityGroups)
if !expected.Equal(actual) {
// This call just replaces the security groups, unlike e.g. subnets (!)
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.SecurityGroups = stringPointerArray(securityGroupIDs)
glog.V(2).Info("Applying updated security groups to load balancer")
_, err := s.elb.ApplySecurityGroupsToLoadBalancer(request)
if err != nil {
//.........這裏部分代碼省略.........
示例6: processDelta
// Returns an error if processing the delta failed, along with a boolean
// indicator of whether the processing should be retried.
func (s *ServiceController) processDelta(delta *cache.Delta) (error, bool) {
service, ok := delta.Object.(*api.Service)
var namespacedName types.NamespacedName
var cachedService *cachedService
if !ok {
// If the DeltaFIFO saw a key in our cache that it didn't know about, it
// can send a deletion with an unknown state. Grab the service from our
// cache for deleting.
key, ok := delta.Object.(cache.DeletedFinalStateUnknown)
if !ok {
return fmt.Errorf("Delta contained object that wasn't a service or a deleted key: %+v", delta), notRetryable
}
cachedService, ok = s.cache.get(key.Key)
if !ok {
return fmt.Errorf("Service %s not in cache even though the watcher thought it was. Ignoring the deletion.", key), notRetryable
}
service = cachedService.lastState
delta.Object = cachedService.lastState
namespacedName = types.NamespacedName{Namespace: service.Namespace, Name: service.Name}
} else {
namespacedName.Namespace = service.Namespace
namespacedName.Name = service.Name
cachedService = s.cache.getOrCreate(namespacedName.String())
}
glog.V(2).Infof("Got new %s delta for service: %+v", delta.Type, service)
// Ensure that no other goroutine will interfere with our processing of the
// service.
cachedService.mu.Lock()
defer cachedService.mu.Unlock()
// Update the cached service (used above for populating synthetic deletes)
cachedService.lastState = service
// TODO: Handle added, updated, and sync differently?
switch delta.Type {
case cache.Added:
fallthrough
case cache.Updated:
fallthrough
case cache.Sync:
err, retry := s.createLoadBalancerIfNeeded(namespacedName, service, cachedService.appliedState)
if err != nil {
message := "Error creating load balancer"
if retry {
message += " (will retry): "
} else {
message += " (will not retry): "
}
message += err.Error()
s.eventRecorder.Event(service, "CreatingLoadBalancerFailed", message)
return err, retry
}
// Always update the cache upon success.
// NOTE: Since we update the cached service if and only if we successfully
// processed it, a cached service being nil implies that it hasn't yet
// been successfully processed.
cachedService.appliedState = service
s.cache.set(namespacedName.String(), cachedService)
case cache.Deleted:
s.eventRecorder.Event(service, "DeletingLoadBalancer", "Deleting load balancer")
err := s.balancer.EnsureTCPLoadBalancerDeleted(s.loadBalancerName(service), s.zone.Region)
if err != nil {
message := "Error deleting load balancer (will retry): " + err.Error()
s.eventRecorder.Event(service, "DeletingLoadBalancerFailed", message)
return err, retryable
}
s.eventRecorder.Event(service, "DeletedLoadBalancer", "Deleted load balancer")
s.cache.delete(namespacedName.String())
default:
glog.Errorf("Unexpected delta type: %v", delta.Type)
}
return nil, notRetryable
}
示例7: reconcileIngress
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
glog.V(4).Infof("Reconciling ingress %q for all clusters", ingress)
if !ic.isSynced() {
ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
return
}
key := ingress.String()
baseIngressObjFromStore, exist, err := ic.ingressInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
ic.deliverIngress(ingress, 0, true)
return
}
if !exist {
// Not federated ingress, ignoring.
glog.V(4).Infof("Ingress %q is not federated. Ignoring.", ingress)
return
}
baseIngressObj, err := conversion.NewCloner().DeepCopy(baseIngressObjFromStore)
baseIngress, ok := baseIngressObj.(*extensionsv1beta1.Ingress)
if err != nil || !ok {
glog.Errorf("Internal Error %v : Object retrieved from ingressInformerStore with key %q is not of correct type *extensionsv1beta1.Ingress: %v", err, key, baseIngressObj)
} else {
glog.V(4).Infof("Base (federated) ingress: %v", baseIngress)
}
if baseIngress.DeletionTimestamp != nil {
if err := ic.delete(baseIngress); err != nil {
glog.Errorf("Failed to delete %s: %v", ingress, err)
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "DeleteFailed",
"Ingress delete failed: %v", err)
ic.deliverIngress(ingress, 0, true)
}
return
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for ingress: %s",
baseIngress.Name)
// Add the required finalizers before creating a ingress in underlying clusters.
updatedIngressObj, err := ic.deletionHelper.EnsureFinalizers(baseIngress)
if err != nil {
glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in ingress %s: %v",
baseIngress.Name, err)
ic.deliverIngress(ingress, 0, true)
return
}
baseIngress = updatedIngressObj.(*extensionsv1beta1.Ingress)
glog.V(3).Infof("Syncing ingress %s in underlying clusters", baseIngress.Name)
clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
return
} else {
glog.V(4).Infof("Found %d ready clusters across which to reconcile ingress %q", len(clusters), ingress)
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
baseIPName, baseIPAnnotationExists := baseIngress.ObjectMeta.Annotations[staticIPNameKeyWritable]
firstClusterName, firstClusterExists := baseIngress.ObjectMeta.Annotations[firstClusterAnnotation]
clusterIngressObj, clusterIngressFound, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get cached ingress %s for cluster %s, will retry: %v", ingress, cluster.Name, err)
ic.deliverIngress(ingress, 0, true)
return
}
desiredIngress := &extensionsv1beta1.Ingress{}
objMeta, err := conversion.NewCloner().DeepCopy(baseIngress.ObjectMeta)
if err != nil {
glog.Errorf("Error deep copying ObjectMeta: %v", err)
}
objSpec, err := conversion.NewCloner().DeepCopy(baseIngress.Spec)
if err != nil {
glog.Errorf("Error deep copying Spec: %v", err)
}
desiredIngress.ObjectMeta, ok = objMeta.(v1.ObjectMeta)
if !ok {
glog.Errorf("Internal error: Failed to cast to v1.ObjectMeta: %v", objMeta)
}
desiredIngress.Spec = objSpec.(extensionsv1beta1.IngressSpec)
if !ok {
glog.Errorf("Internal error: Failed to cast to extensionsv1beta1.Ingressespec: %v", objSpec)
}
glog.V(4).Infof("Desired Ingress: %v", desiredIngress)
if !clusterIngressFound {
glog.V(4).Infof("No existing Ingress %s in cluster %s - checking if appropriate to queue a create operation", ingress, cluster.Name)
// We can't supply server-created fields when creating a new object.
desiredIngress.ObjectMeta = util.DeepCopyRelevantObjectMeta(baseIngress.ObjectMeta)
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
"Creating ingress in cluster %s", cluster.Name)
// We always first create an ingress in the first available cluster. Once that ingress
// has been created and allocated a global IP (visible via an annotation),
// we record that annotation on the federated ingress, and create all other cluster
//.........這裏部分代碼省略.........
示例8: reconcileConfigMapForCluster
/*
reconcileConfigMapForCluster ensures that the configmap for the ingress controller in the cluster has objectmeta.data.UID
consistent with all the other clusters in the federation. If clusterName == allClustersKey, then all avaliable clusters
configmaps are reconciled.
*/
func (ic *IngressController) reconcileConfigMapForCluster(clusterName string) {
glog.V(4).Infof("Reconciling ConfigMap for cluster(s) %q", clusterName)
if !ic.isSynced() {
ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
return
}
ingressList := ic.ingressInformerStore.List()
if len(ingressList) <= 0 {
glog.V(4).Infof("No federated ingresses, ignore reconcile config map.")
return
}
if clusterName == allClustersKey {
clusters, err := ic.configMapFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get ready clusters. redelivering %q: %v", clusterName, err)
ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
return
}
for _, cluster := range clusters {
glog.V(4).Infof("Delivering ConfigMap for cluster(s) %q", clusterName)
ic.configMapDeliverer.DeliverAt(cluster.Name, nil, time.Now())
}
return
} else {
cluster, found, err := ic.configMapFederatedInformer.GetReadyCluster(clusterName)
if err != nil || !found {
glog.Errorf("Internal error: Cluster %q queued for configmap reconciliation, but not found. Will try again later: error = %v", clusterName, err)
ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.clusterAvailableDelay)
return
}
uidConfigMapNamespacedName := types.NamespacedName{Name: uidConfigMapName, Namespace: uidConfigMapNamespace}
configMapObj, found, err := ic.configMapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, uidConfigMapNamespacedName.String())
if !found || err != nil {
glog.Errorf("Failed to get ConfigMap %q for cluster %q. Will try again later: %v", uidConfigMapNamespacedName, cluster.Name, err)
ic.configMapDeliverer.DeliverAfter(clusterName, nil, ic.configMapReviewDelay)
return
}
glog.V(4).Infof("Successfully got ConfigMap %q for cluster %q.", uidConfigMapNamespacedName, clusterName)
configMap, ok := configMapObj.(*v1.ConfigMap)
if !ok {
glog.Errorf("Internal error: The object in the ConfigMap cache for cluster %q configmap %q is not a *ConfigMap", cluster.Name, uidConfigMapNamespacedName)
return
}
ic.reconcileConfigMap(cluster, configMap)
return
}
}
示例9: reconcileConfigMap
func (configmapcontroller *ConfigMapController) reconcileConfigMap(configmap types.NamespacedName) {
if !configmapcontroller.isSynced() {
glog.V(4).Infof("Configmap controller not synced")
configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
return
}
key := configmap.String()
baseConfigMapObj, exist, err := configmapcontroller.configmapInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main configmap store for %v: %v", key, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
if !exist {
// Not federated configmap, ignoring.
glog.V(8).Infof("Skipping not federated config map: %s", key)
return
}
baseConfigMap := baseConfigMapObj.(*apiv1.ConfigMap)
clusters, err := configmapcontroller.configmapFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v, retrying shortly", err)
configmapcontroller.deliverConfigMap(configmap, configmapcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterConfigMapObj, found, err := configmapcontroller.configmapFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v, retrying shortly", key, cluster.Name, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
// Do not modify data.
desiredConfigMap := &apiv1.ConfigMap{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseConfigMap.ObjectMeta),
Data: baseConfigMap.Data,
}
if !found {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "CreateInCluster",
"Creating configmap in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredConfigMap,
ClusterName: cluster.Name,
})
} else {
clusterConfigMap := clusterConfigMapObj.(*apiv1.ConfigMap)
// Update existing configmap, if needed.
if !util.ConfigMapEquivalent(desiredConfigMap, clusterConfigMap) {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInCluster",
"Updating configmap in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredConfigMap,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
glog.V(8).Infof("No operations needed for %s", key)
return
}
err = configmapcontroller.federatedUpdater.UpdateWithOnError(operations, configmapcontroller.updateTimeout,
func(op util.FederatedOperation, operror error) {
configmapcontroller.eventRecorder.Eventf(baseConfigMap, api.EventTypeNormal, "UpdateInClusterFailed",
"ConfigMap update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v, retrying shortly", key, err)
configmapcontroller.deliverConfigMap(configmap, 0, true)
return
}
}
示例10: pairScalesWithScaleRefs
// pairScalesWithScaleRefs takes some subresource references, a map of new scales for those subresource references,
// and annotations from an existing object. It merges the scales and references found in the existing annotations
// with the new data (using the new scale in case of conflict if present and not 0, and the old scale otherwise),
// and returns a slice of RecordedScaleReferences suitable for using as the new annotation value.
func pairScalesWithScaleRefs(serviceName types.NamespacedName, annotations map[string]string, rawScaleRefs map[unidlingapi.CrossGroupObjectReference]struct{}, scales map[unidlingapi.CrossGroupObjectReference]int32) ([]unidlingapi.RecordedScaleReference, error) {
oldTargetsRaw, hasOldTargets := annotations[unidlingapi.UnidleTargetAnnotation]
scaleRefs := make([]unidlingapi.RecordedScaleReference, 0, len(rawScaleRefs))
// initialize the list of new annotations
for rawScaleRef := range rawScaleRefs {
scaleRefs = append(scaleRefs, unidlingapi.RecordedScaleReference{
CrossGroupObjectReference: rawScaleRef,
Replicas: 0,
})
}
// if the new preserved scale would be 0, see if we have an old scale that we can use instead
if hasOldTargets {
var oldTargets []unidlingapi.RecordedScaleReference
oldTargetsSet := make(map[unidlingapi.CrossGroupObjectReference]int)
if err := json.Unmarshal([]byte(oldTargetsRaw), &oldTargets); err != nil {
return nil, fmt.Errorf("unable to extract existing scale information from endpoints %s: %v", serviceName.String(), err)
}
for i, target := range oldTargets {
oldTargetsSet[target.CrossGroupObjectReference] = i
}
// figure out which new targets were already present...
for _, newScaleRef := range scaleRefs {
if oldTargetInd, ok := oldTargetsSet[newScaleRef.CrossGroupObjectReference]; ok {
if newScale, ok := scales[newScaleRef.CrossGroupObjectReference]; !ok || newScale == 0 {
scales[newScaleRef.CrossGroupObjectReference] = oldTargets[oldTargetInd].Replicas
}
delete(oldTargetsSet, newScaleRef.CrossGroupObjectReference)
}
}
// ...and add in any existing targets not already on the new list to the new list
for _, ind := range oldTargetsSet {
scaleRefs = append(scaleRefs, oldTargets[ind])
}
}
for i := range scaleRefs {
scaleRef := &scaleRefs[i]
newScale, ok := scales[scaleRef.CrossGroupObjectReference]
if !ok || newScale == 0 {
newScale = 1
if scaleRef.Replicas != 0 {
newScale = scaleRef.Replicas
}
}
scaleRef.Replicas = newScale
}
return scaleRefs, nil
}
示例11: reconcileIngress
func (ic *IngressController) reconcileIngress(ingress types.NamespacedName) {
glog.V(4).Infof("Reconciling ingress %q", ingress)
if !ic.isSynced() {
ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
return
}
key := ingress.String()
baseIngressObj, exist, err := ic.ingressInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main ingress store for %v: %v", ingress, err)
ic.deliverIngress(ingress, 0, true)
return
}
if !exist {
// Not federated ingress, ignoring.
glog.V(4).Infof("Ingress %q is not federated. Ignoring.", ingress)
return
}
baseIngress := baseIngressObj.(*extensions_v1beta1.Ingress)
clusters, err := ic.ingressFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
ic.deliverIngress(ingress, ic.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for clusterIndex, cluster := range clusters {
_, baseIPExists := baseIngress.ObjectMeta.Annotations[staticIPAnnotationKey]
clusterIngressObj, found, err := ic.ingressFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", ingress, cluster.Name, err)
ic.deliverIngress(ingress, 0, true)
return
}
desiredIngress := &extensions_v1beta1.Ingress{
ObjectMeta: baseIngress.ObjectMeta,
Spec: baseIngress.Spec,
}
if !found {
// We can't supply server-created fields when creating a new object.
desiredIngress.ObjectMeta.ResourceVersion = ""
desiredIngress.ObjectMeta.UID = ""
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "CreateInCluster",
"Creating ingress in cluster %s", cluster.Name)
// We always first create an ingress in the first available cluster. Once that ingress
// has been created and allocated a global IP (visible via an annotation),
// we record that annotation on the federated ingress, and create all other cluster
// ingresses with that same global IP.
// Note: If the first cluster becomes (e.g. temporarily) unavailable, the second cluster will be allocated
// index 0, but eventually all ingresses will share the single global IP recorded in the annotation
// of the federated ingress.
if baseIPExists || (clusterIndex == 0) {
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredIngress,
ClusterName: cluster.Name,
})
}
} else {
clusterIngress := clusterIngressObj.(*extensions_v1beta1.Ingress)
glog.V(4).Infof("Found existing Ingress %s in cluster %s - checking if update is required", ingress, cluster.Name)
clusterIPName, clusterIPExists := clusterIngress.ObjectMeta.Annotations[staticIPAnnotationKey]
if !baseIPExists && clusterIPExists {
// Add annotation to federated ingress via API.
original, err := ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).Get(baseIngress.Name)
if err == nil {
original.ObjectMeta.Annotations[staticIPAnnotationKey] = clusterIPName
if _, err = ic.federatedApiClient.Extensions().Ingresses(baseIngress.Namespace).Update(original); err != nil {
glog.Errorf("Failed to add static IP annotation to federated ingress %q: %v", ingress, err)
}
} else {
glog.Errorf("Failed to get federated ingress %q: %v", ingress, err)
}
}
// Update existing ingress, if needed.
if !util.ObjectMetaIsEquivalent(desiredIngress.ObjectMeta, clusterIngress.ObjectMeta) ||
!reflect.DeepEqual(desiredIngress.Spec, clusterIngress.Spec) {
// TODO: In some cases Ingress controllers in the clusters add annotations, so we ideally need to exclude those from
// the equivalence comparison to cut down on unnecessary updates.
glog.V(4).Infof("Ingress %s in cluster %s needs an update: cluster ingress %v is not equivalent to federated ingress %v", ingress, cluster.Name, clusterIngress, desiredIngress)
// We need to use server-created fields from the cluster, not the desired object when updating.
desiredIngress.ObjectMeta.ResourceVersion = clusterIngress.ObjectMeta.ResourceVersion
desiredIngress.ObjectMeta.UID = clusterIngress.ObjectMeta.UID
// Merge any annotations on the federated ingress onto the underlying cluster ingress,
// overwriting duplicates.
// TODO: We should probably use a PATCH operation for this instead.
for key, val := range baseIngress.ObjectMeta.Annotations {
desiredIngress.ObjectMeta.Annotations[key] = val
}
ic.eventRecorder.Eventf(baseIngress, api.EventTypeNormal, "UpdateInCluster",
"Updating ingress in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
//.........這裏部分代碼省略.........
示例12: reconcileSecret
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {
if !secretcontroller.isSynced() {
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
key := secret.String()
baseSecretObjFromStore, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main secret store for %v: %v", key, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
if !exist {
// Not federated secret, ignoring.
return
}
// Create a copy before modifying the obj to prevent race condition with
// other readers of obj from store.
baseSecretObj, err := api.Scheme.DeepCopy(baseSecretObjFromStore)
baseSecret, ok := baseSecretObj.(*apiv1.Secret)
if err != nil || !ok {
glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
if baseSecret.DeletionTimestamp != nil {
if err := secretcontroller.delete(baseSecret); err != nil {
glog.Errorf("Failed to delete %s: %v", secret, err)
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "DeleteFailed",
"Secret delete failed: %v", err)
secretcontroller.deliverSecret(secret, 0, true)
}
return
}
glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for secret: %s",
baseSecret.Name)
// Add the required finalizers before creating a secret in underlying clusters.
updatedSecretObj, err := secretcontroller.deletionHelper.EnsureFinalizers(baseSecret)
if err != nil {
glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in secret %s: %v",
baseSecret.Name, err)
secretcontroller.deliverSecret(secret, 0, false)
return
}
baseSecret = updatedSecretObj.(*apiv1.Secret)
glog.V(3).Infof("Syncing secret %s in underlying clusters", baseSecret.Name)
clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
// The data should not be modified.
desiredSecret := &apiv1.Secret{
ObjectMeta: util.DeepCopyRelevantObjectMeta(baseSecret.ObjectMeta),
Data: baseSecret.Data,
Type: baseSecret.Type,
}
if !found {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
"Creating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
} else {
clusterSecret := clusterSecretObj.(*apiv1.Secret)
// Update existing secret, if needed.
if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
"Updating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
}
}
}
//.........這裏部分代碼省略.........
示例13: ensureLoadBalancer
func (s *AWSCloud) ensureLoadBalancer(namespacedName types.NamespacedName, loadBalancerName string, listeners []*elb.Listener, subnetIDs []string, securityGroupIDs []string, internalELB bool) (*elb.LoadBalancerDescription, error) {
loadBalancer, err := s.describeLoadBalancer(loadBalancerName)
if err != nil {
return nil, err
}
dirty := false
if loadBalancer == nil {
createRequest := &elb.CreateLoadBalancerInput{}
createRequest.LoadBalancerName = aws.String(loadBalancerName)
createRequest.Listeners = listeners
if internalELB {
createRequest.Scheme = aws.String("internal")
}
// We are supposed to specify one subnet per AZ.
// TODO: What happens if we have more than one subnet per AZ?
createRequest.Subnets = stringPointerArray(subnetIDs)
createRequest.SecurityGroups = stringPointerArray(securityGroupIDs)
createRequest.Tags = []*elb.Tag{
{Key: aws.String(TagNameKubernetesCluster), Value: aws.String(s.getClusterName())},
{Key: aws.String(TagNameKubernetesService), Value: aws.String(namespacedName.String())},
}
glog.Infof("Creating load balancer for %v with name: ", namespacedName, loadBalancerName)
_, err := s.elb.CreateLoadBalancer(createRequest)
if err != nil {
return nil, err
}
dirty = true
} else {
// TODO: Sync internal vs non-internal
{
// Sync subnets
expected := sets.NewString(subnetIDs...)
actual := stringSetFromPointers(loadBalancer.Subnets)
additions := expected.Difference(actual)
removals := actual.Difference(expected)
if removals.Len() != 0 {
request := &elb.DetachLoadBalancerFromSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(removals)
glog.V(2).Info("Detaching load balancer from removed subnets")
_, err := s.elb.DetachLoadBalancerFromSubnets(request)
if err != nil {
return nil, fmt.Errorf("error detaching AWS loadbalancer from subnets: %v", err)
}
dirty = true
}
if additions.Len() != 0 {
request := &elb.AttachLoadBalancerToSubnetsInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.Subnets = stringSetToPointers(additions)
glog.V(2).Info("Attaching load balancer to added subnets")
_, err := s.elb.AttachLoadBalancerToSubnets(request)
if err != nil {
return nil, fmt.Errorf("error attaching AWS loadbalancer to subnets: %v", err)
}
dirty = true
}
}
{
// Sync security groups
expected := sets.NewString(securityGroupIDs...)
actual := stringSetFromPointers(loadBalancer.SecurityGroups)
if !expected.Equal(actual) {
// This call just replaces the security groups, unlike e.g. subnets (!)
request := &elb.ApplySecurityGroupsToLoadBalancerInput{}
request.LoadBalancerName = aws.String(loadBalancerName)
request.SecurityGroups = stringPointerArray(securityGroupIDs)
glog.V(2).Info("Applying updated security groups to load balancer")
_, err := s.elb.ApplySecurityGroupsToLoadBalancer(request)
if err != nil {
return nil, fmt.Errorf("error applying AWS loadbalancer security groups: %v", err)
}
dirty = true
}
}
{
// Sync listeners
listenerDescriptions := loadBalancer.ListenerDescriptions
foundSet := make(map[int]bool)
removals := []*int64{}
for _, listenerDescription := range listenerDescriptions {
actual := listenerDescription.Listener
if actual == nil {
glog.Warning("Ignoring empty listener in AWS loadbalancer: ", loadBalancerName)
//.........這裏部分代碼省略.........
示例14: reconcileSecret
func (secretcontroller *SecretController) reconcileSecret(secret types.NamespacedName) {
if !secretcontroller.isSynced() {
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
key := secret.String()
baseSecretObj, exist, err := secretcontroller.secretInformerStore.GetByKey(key)
if err != nil {
glog.Errorf("Failed to query main secret store for %v: %v", key, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
if !exist {
// Not federated secret, ignoring.
return
}
baseSecret := baseSecretObj.(*api_v1.Secret)
clusters, err := secretcontroller.secretFederatedInformer.GetReadyClusters()
if err != nil {
glog.Errorf("Failed to get cluster list: %v", err)
secretcontroller.deliverSecret(secret, secretcontroller.clusterAvailableDelay, false)
return
}
operations := make([]util.FederatedOperation, 0)
for _, cluster := range clusters {
clusterSecretObj, found, err := secretcontroller.secretFederatedInformer.GetTargetStore().GetByKey(cluster.Name, key)
if err != nil {
glog.Errorf("Failed to get %s from %s: %v", key, cluster.Name, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
desiredSecret := &api_v1.Secret{
ObjectMeta: util.CopyObjectMeta(baseSecret.ObjectMeta),
Data: baseSecret.Data,
Type: baseSecret.Type,
}
if !found {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "CreateInCluster",
"Creating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeAdd,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
} else {
clusterSecret := clusterSecretObj.(*api_v1.Secret)
// Update existing secret, if needed.
if !util.SecretEquivalent(*desiredSecret, *clusterSecret) {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInCluster",
"Updating secret in cluster %s", cluster.Name)
operations = append(operations, util.FederatedOperation{
Type: util.OperationTypeUpdate,
Obj: desiredSecret,
ClusterName: cluster.Name,
})
}
}
}
if len(operations) == 0 {
// Everything is in order
return
}
err = secretcontroller.federatedUpdater.UpdateWithOnError(operations, secretcontroller.updateTimeout,
func(op util.FederatedOperation, operror error) {
secretcontroller.eventRecorder.Eventf(baseSecret, api.EventTypeNormal, "UpdateInClusterFailed",
"Secret update in cluster %s failed: %v", op.ClusterName, operror)
})
if err != nil {
glog.Errorf("Failed to execute updates for %s: %v", key, err)
secretcontroller.deliverSecret(secret, 0, true)
return
}
// Evertyhing is in order but lets be double sure
secretcontroller.deliverSecret(secret, secretcontroller.secretReviewDelay, false)
}