当前位置: 首页>>代码示例>>Golang>>正文


Golang v1beta1.ReplicaSetStatus类代码示例

本文整理汇总了Golang中k8s/io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetStatus的典型用法代码示例。如果您正苦于以下问题:Golang ReplicaSetStatus类的具体用法?Golang ReplicaSetStatus怎么用?Golang ReplicaSetStatus使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ReplicaSetStatus类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: SetCondition

// SetCondition adds/replaces the given condition in the replica set status. If the condition that we
// are about to add already exists and has the same status and reason then we are not going to update.
func SetCondition(status *extensions.ReplicaSetStatus, condition extensions.ReplicaSetCondition) {
	currentCond := GetCondition(*status, condition.Type)
	if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
		return
	}
	newConditions := filterOutCondition(status.Conditions, condition.Type)
	status.Conditions = append(newConditions, condition)
}
开发者ID:nak3,项目名称:kubernetes,代码行数:10,代码来源:replica_set_utils.go

示例2: updateReplicaSetStatus

// updateReplicaSetStatus attempts to update the Status.Replicas of the given ReplicaSet, with a single GET/PUT retry.
func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs extensions.ReplicaSet, newStatus extensions.ReplicaSetStatus) (updateErr error) {
	// This is the steady state. It happens when the ReplicaSet doesn't have any expectations, since
	// we do a periodic relist every 30s. If the generations differ but the replicas are
	// the same, a caller might've resized to the same replica count.
	if rs.Status.Replicas == newStatus.Replicas &&
		rs.Status.FullyLabeledReplicas == newStatus.FullyLabeledReplicas &&
		rs.Status.ReadyReplicas == newStatus.ReadyReplicas &&
		rs.Status.AvailableReplicas == newStatus.AvailableReplicas &&
		rs.Generation == rs.Status.ObservedGeneration &&
		reflect.DeepEqual(rs.Status.Conditions, newStatus.Conditions) {
		return nil
	}

	// deep copy to avoid mutation now.
	// TODO this method need some work.  Retry on conflict probably, though I suspect this is stomping status to something it probably shouldn't
	copyObj, err := api.Scheme.DeepCopy(rs)
	if err != nil {
		return err
	}
	rs = copyObj.(extensions.ReplicaSet)

	// Save the generation number we acted on, otherwise we might wrongfully indicate
	// that we've seen a spec update when we retry.
	// TODO: This can clobber an update if we allow multiple agents to write to the
	// same status.
	newStatus.ObservedGeneration = rs.Generation

	var getErr error
	for i, rs := 0, &rs; ; i++ {
		glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
			fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
			fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
			fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) +
			fmt.Sprintf("availableReplicas %d->%d, ", rs.Status.AvailableReplicas, newStatus.AvailableReplicas) +
			fmt.Sprintf("sequence No: %v->%v", rs.Status.ObservedGeneration, newStatus.ObservedGeneration))

		rs.Status = newStatus
		_, updateErr = c.UpdateStatus(rs)
		if updateErr == nil || i >= statusUpdateRetries {
			return updateErr
		}
		// Update the ReplicaSet with the latest resource version for the next poll
		if rs, getErr = c.Get(rs.Name); getErr != nil {
			// If the GET fails we can't trust status.Replicas anymore. This error
			// is bound to be more interesting than the update failure.
			return getErr
		}
	}
}
开发者ID:nak3,项目名称:kubernetes,代码行数:50,代码来源:replica_set_utils.go

示例3: RemoveCondition

// RemoveCondition removes the condition with the provided type from the replica set status.
func RemoveCondition(status *extensions.ReplicaSetStatus, condType extensions.ReplicaSetConditionType) {
	status.Conditions = filterOutCondition(status.Conditions, condType)
}
开发者ID:nak3,项目名称:kubernetes,代码行数:4,代码来源:replica_set_utils.go

示例4: reconcileReplicaSet

func (frsc *ReplicaSetController) reconcileReplicaSet(key string) (reconciliationStatus, error) {
	if !frsc.isSynced() {
		return statusNotSynced, nil
	}

	glog.V(4).Infof("Start reconcile replicaset %q", key)
	startTime := time.Now()
	defer glog.V(4).Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime))

	objFromStore, exists, err := frsc.replicaSetStore.Indexer.GetByKey(key)
	if err != nil {
		return statusError, err
	}
	if !exists {
		// don't delete local replicasets for now. Do not reconcile it anymore.
		return statusAllOk, nil
	}
	obj, err := conversion.NewCloner().DeepCopy(objFromStore)
	frs, ok := obj.(*extensionsv1.ReplicaSet)
	if err != nil || !ok {
		glog.Errorf("Error in retrieving obj from store: %v, %v", ok, err)
		frsc.deliverReplicaSetByKey(key, 0, true)
		return statusError, err
	}
	if frs.DeletionTimestamp != nil {
		if err := frsc.delete(frs); err != nil {
			glog.Errorf("Failed to delete %s: %v", frs, err)
			frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "DeleteFailed",
				"ReplicaSet delete failed: %v", err)
			frsc.deliverReplicaSetByKey(key, 0, true)
			return statusError, err
		}
		return statusAllOk, nil
	}

	glog.V(3).Infof("Ensuring delete object from underlying clusters finalizer for replicaset: %s",
		frs.Name)
	// Add the required finalizers before creating a replicaset in underlying clusters.
	updatedRsObj, err := frsc.deletionHelper.EnsureFinalizers(frs)
	if err != nil {
		glog.Errorf("Failed to ensure delete object from underlying clusters finalizer in replicaset %s: %v",
			frs.Name, err)
		frsc.deliverReplicaSetByKey(key, 0, false)
		return statusError, err
	}
	frs = updatedRsObj.(*extensionsv1.ReplicaSet)

	glog.V(3).Infof("Syncing replicaset %s in underlying clusters", frs.Name)

	clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters()
	if err != nil {
		return statusError, err
	}

	// collect current status and do schedule
	allPods, err := frsc.fedPodInformer.GetTargetStore().List()
	if err != nil {
		return statusError, err
	}
	podStatus, err := podanalyzer.AnalysePods(frs.Spec.Selector, allPods, time.Now())
	current := make(map[string]int64)
	estimatedCapacity := make(map[string]int64)
	for _, cluster := range clusters {
		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			return statusError, err
		}
		if exists {
			lrs := lrsObj.(*extensionsv1.ReplicaSet)
			current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well?
			unschedulable := int64(podStatus[cluster.Name].Unschedulable)
			if unschedulable > 0 {
				estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable
			}
		}
	}

	scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity)

	glog.V(4).Infof("Start syncing local replicaset %s: %v", key, scheduleResult)

	fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation}
	operations := make([]fedutil.FederatedOperation, 0)
	for clusterName, replicas := range scheduleResult {

		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key)
		if err != nil {
			return statusError, err
		}

		lrs := &extensionsv1.ReplicaSet{
			ObjectMeta: fedutil.CopyObjectMeta(frs.ObjectMeta),
			Spec:       frs.Spec,
		}
		specReplicas := int32(replicas)
		lrs.Spec.Replicas = &specReplicas

		if !exists {
			if replicas > 0 {
				frsc.eventRecorder.Eventf(frs, api.EventTypeNormal, "CreateInCluster",
//.........这里部分代码省略.........
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:101,代码来源:replicasetcontroller.go

示例5: reconcileReplicaSet

func (frsc *ReplicaSetController) reconcileReplicaSet(key string) error {
	if !frsc.isSynced() {
		frsc.deliverReplicaSetByKey(key, clusterAvailableDelay, false)
		return nil
	}

	glog.Infof("Start reconcile replicaset %q", key)
	startTime := time.Now()
	defer glog.Infof("Finished reconcile replicaset %q (%v)", key, time.Now().Sub(startTime))

	obj, exists, err := frsc.replicaSetStore.Store.GetByKey(key)
	if err != nil {
		return err
	}
	if !exists {
		// don't delete local replicasets for now
		return nil
	}
	frs := obj.(*extensionsv1.ReplicaSet)

	clusters, err := frsc.fedReplicaSetInformer.GetReadyClusters()
	if err != nil {
		return err
	}

	// collect current status and do schedule
	allPods, err := frsc.fedPodInformer.GetTargetStore().List()
	if err != nil {
		return err
	}
	podStatus, err := AnalysePods(frs, allPods, time.Now())
	current := make(map[string]int64)
	estimatedCapacity := make(map[string]int64)
	for _, cluster := range clusters {
		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(cluster.Name, key)
		if err != nil {
			return err
		}
		if exists {
			lrs := lrsObj.(*extensionsv1.ReplicaSet)
			current[cluster.Name] = int64(podStatus[cluster.Name].RunningAndReady) // include pending as well?
			unschedulable := int64(podStatus[cluster.Name].Unschedulable)
			if unschedulable > 0 {
				estimatedCapacity[cluster.Name] = int64(*lrs.Spec.Replicas) - unschedulable
			}
		}
	}

	scheduleResult := frsc.schedule(frs, clusters, current, estimatedCapacity)

	glog.Infof("Start syncing local replicaset %v", scheduleResult)

	fedStatus := extensionsv1.ReplicaSetStatus{ObservedGeneration: frs.Generation}
	for clusterName, replicas := range scheduleResult {
		// TODO: updater or parallelizer doesnn't help as results are needed for updating fed rs status
		clusterClient, err := frsc.fedReplicaSetInformer.GetClientsetForCluster(clusterName)
		if err != nil {
			return err
		}
		lrsObj, exists, err := frsc.fedReplicaSetInformer.GetTargetStore().GetByKey(clusterName, key)
		if err != nil {
			return err
		} else if !exists {
			if replicas > 0 {
				lrs := &extensionsv1.ReplicaSet{
					ObjectMeta: apiv1.ObjectMeta{
						Name:        frs.Name,
						Namespace:   frs.Namespace,
						Labels:      frs.Labels,
						Annotations: frs.Annotations,
					},
					Spec: frs.Spec,
				}
				specReplicas := int32(replicas)
				lrs.Spec.Replicas = &specReplicas
				lrs, err = clusterClient.Extensions().ReplicaSets(frs.Namespace).Create(lrs)
				if err != nil {
					return err
				}
				fedStatus.Replicas += lrs.Status.Replicas
				fedStatus.FullyLabeledReplicas += lrs.Status.FullyLabeledReplicas
			}
		} else {
			lrs := lrsObj.(*extensionsv1.ReplicaSet)
			lrsExpectedSpec := frs.Spec
			specReplicas := int32(replicas)
			lrsExpectedSpec.Replicas = &specReplicas
			if !reflect.DeepEqual(lrs.Spec, lrsExpectedSpec) {
				lrs.Spec = lrsExpectedSpec
				lrs, err = clusterClient.Extensions().ReplicaSets(frs.Namespace).Update(lrs)
				if err != nil {
					return err
				}
			}
			fedStatus.Replicas += lrs.Status.Replicas
			fedStatus.FullyLabeledReplicas += lrs.Status.FullyLabeledReplicas
			// leave the replicaset even the replicas dropped to 0
		}
	}
	if fedStatus.Replicas != frs.Status.Replicas || fedStatus.FullyLabeledReplicas != frs.Status.FullyLabeledReplicas {
//.........这里部分代码省略.........
开发者ID:vmware,项目名称:kubernetes,代码行数:101,代码来源:replicasetcontroller.go


注:本文中的k8s/io/kubernetes/pkg/apis/extensions/v1beta1.ReplicaSetStatus类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。