本文整理汇总了Golang中k8s/io/kubernetes/pkg/quota.ResourceNames函数的典型用法代码示例。如果您正苦于以下问题:Golang ResourceNames函数的具体用法?Golang ResourceNames怎么用?Golang ResourceNames使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ResourceNames函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: PodConstraintsFunc
// PodConstraintsFunc verifies that all required resources are present on the pod
func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
pod, ok := object.(*api.Pod)
if !ok {
return fmt.Errorf("Unexpected input object %v", object)
}
// TODO: fix this when we have pod level cgroups
// since we do not yet pod level requests/limits, we need to ensure each
// container makes an explict request or limit for a quota tracked resource
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
for i := range pod.Spec.Containers {
requests := pod.Spec.Containers[i].Resources.Requests
limits := pod.Spec.Containers[i].Resources.Limits
containerUsage := podUsageHelper(requests, limits)
containerSet := quota.ToSet(quota.ResourceNames(containerUsage))
if !containerSet.Equal(requiredSet) {
difference := requiredSet.Difference(containerSet)
missingSet.Insert(difference.List()...)
}
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例2: replenishQuota
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
func (c *ClusterQuotaReconcilationController) replenishQuota(groupKind unversioned.GroupKind, namespace string, object runtime.Object) {
// check if the quota controller can evaluate this kind, if not, ignore it altogether...
evaluators := c.registry.Evaluators()
evaluator, found := evaluators[groupKind]
if !found {
return
}
quotaNames, _ := c.clusterQuotaMapper.GetClusterQuotasFor(namespace)
// only queue those quotas that are tracking a resource associated with this kind.
matchedResources := evaluator.MatchesResources()
for _, quotaName := range quotaNames {
quota, err := c.clusterQuotaLister.Get(quotaName)
if err != nil {
// replenishment will be delayed, but we'll get back around to it later if it matters
continue
}
resourceQuotaResources := utilquota.ResourceNames(quota.Status.Total.Hard)
if len(utilquota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
c.forceCalculation(quotaName, namespace)
}
}
}
示例3: replenishQuota
// replenishQuota is a replenishment function invoked by a controller to notify that a quota should be recalculated
func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKind, namespace string, object runtime.Object) {
// check if the quota controller can evaluate this kind, if not, ignore it altogether...
evaluators := rq.registry.Evaluators()
evaluator, found := evaluators[groupKind]
if !found {
return
}
// check if this namespace even has a quota...
indexKey := &api.ResourceQuota{}
indexKey.Namespace = namespace
resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey)
if err != nil {
glog.Errorf("quota controller could not find ResourceQuota associated with namespace: %s, could take up to %v before a quota replenishes", namespace, rq.resyncPeriod())
}
if len(resourceQuotas) == 0 {
return
}
// only queue those quotas that are tracking a resource associated with this kind.
matchedResources := evaluator.MatchesResources()
for i := range resourceQuotas {
resourceQuota := resourceQuotas[i].(*api.ResourceQuota)
resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard)
if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
rq.enqueueResourceQuota(resourceQuota)
}
}
}
示例4: syncResourceQuota
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.ResourceQuota) (err error) {
// quota is dirty if any part of spec hard limits differs from the status hard limits
dirty := !api.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
resourceQuota := api.ResourceQuota{}
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(&v1ResourceQuota, &resourceQuota, nil); err != nil {
return err
}
// dirty tracks if the usage status differs from the previous sync,
// if so, we send a new usage with latest status
// if this is our first sync, it will be dirty by default, since we need track usage
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
used := api.ResourceList{}
if resourceQuota.Status.Used != nil {
used = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
}
hardLimits := quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard)
newUsage, err := quota.CalculateUsage(resourceQuota.Namespace, resourceQuota.Spec.Scopes, hardLimits, rq.registry)
if err != nil {
return err
}
for key, value := range newUsage {
used[key] = value
}
// ensure set of used values match those that have hard constraints
hardResources := quota.ResourceNames(hardLimits)
used = quota.Mask(used, hardResources)
// Create a usage object that is based on the quota resource version that will handle updates
// by default, we preserve the past usage observation, and set hard to the current spec
usage := api.ResourceQuota{
ObjectMeta: metav1.ObjectMeta{
Name: resourceQuota.Name,
Namespace: resourceQuota.Namespace,
ResourceVersion: resourceQuota.ResourceVersion,
Labels: resourceQuota.Labels,
Annotations: resourceQuota.Annotations},
Status: api.ResourceQuotaStatus{
Hard: hardLimits,
Used: used,
},
}
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
// there was a change observed by this controller that requires we update quota
if dirty {
v1Usage := &v1.ResourceQuota{}
if err := v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
return err
}
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
return err
}
return nil
}
示例5: enforcePodContainerConstraints
// enforcePodContainerConstraints checks for required resources that are not set on this container and
// adds them to missingSet.
func enforcePodContainerConstraints(container *api.Container, requiredSet, missingSet sets.String) {
requests := container.Resources.Requests
limits := container.Resources.Limits
containerUsage := podUsageHelper(requests, limits)
containerSet := quota.ToSet(quota.ResourceNames(containerUsage))
if !containerSet.Equal(requiredSet) {
difference := requiredSet.Difference(containerSet)
missingSet.Insert(difference.List()...)
}
}
示例6: isLimitSynced
func isLimitSynced(received, expected kapi.ResourceList) bool {
resourceNames := quota.ResourceNames(expected)
masked := quota.Mask(received, resourceNames)
if len(masked) != len(expected) {
return false
}
if le, _ := quota.LessThanOrEqual(masked, expected); !le {
return false
}
if le, _ := quota.LessThanOrEqual(expected, masked); !le {
return false
}
return true
}
示例7: Matches
// Matches returns true if the quota matches the specified item.
func Matches(resourceQuota *api.ResourceQuota, item runtime.Object, matchFunc MatchingResourceNamesFunc, scopeFunc MatchesScopeFunc) (bool, error) {
if resourceQuota == nil {
return false, fmt.Errorf("expected non-nil quota")
}
// verify the quota matches on at least one resource
matchResource := len(matchFunc(quota.ResourceNames(resourceQuota.Status.Hard))) > 0
// by default, no scopes matches all
matchScope := true
for _, scope := range resourceQuota.Spec.Scopes {
innerMatch, err := scopeFunc(scope, item)
if err != nil {
return false, err
}
matchScope = matchScope && innerMatch
}
return matchResource && matchScope, nil
}
示例8: PersistentVolumeClaimConstraintsFunc
// PersistentVolumeClaimConstraintsFunc verifies that all required resources are present on the claim
// In addition, it validates that the resources are valid (i.e. requests < limits)
func PersistentVolumeClaimConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
pvc, ok := object.(*api.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("unexpected input object %v", object)
}
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
pvcUsage := PersistentVolumeClaimUsageFunc(pvc)
pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage))
if diff := requiredSet.Difference(pvcSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例9: ServiceConstraintsFunc
// ServiceConstraintsFunc verifies that all required resources are captured in service usage.
func ServiceConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
service, ok := object.(*api.Service)
if !ok {
return fmt.Errorf("unexpected input object %v", object)
}
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
serviceUsage := ServiceUsageFunc(service)
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例10: Constraints
// Constraints verifies that all required resources are present on the item
func (p *serviceEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
service, ok := item.(*api.Service)
if !ok {
return fmt.Errorf("unexpected input object %v", item)
}
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
serviceUsage, err := p.Usage(service)
if err != nil {
return err
}
serviceSet := quota.ToSet(quota.ResourceNames(serviceUsage))
if diff := requiredSet.Difference(serviceSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例11: PodConstraintsFunc
// PodConstraintsFunc verifies that all required resources are present on the pod
// In addition, it validates that the resources are valid (i.e. requests < limits)
func PodConstraintsFunc(required []api.ResourceName, object runtime.Object) error {
pod, ok := object.(*api.Pod)
if !ok {
return fmt.Errorf("Unexpected input object %v", object)
}
// Pod level resources are often set during admission control
// As a consequence, we want to verify that resources are valid prior
// to ever charging quota prematurely in case they are not.
allErrs := field.ErrorList{}
fldPath := field.NewPath("spec").Child("containers")
for i, ctr := range pod.Spec.Containers {
idxPath := fldPath.Index(i)
allErrs = append(allErrs, validation.ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...)
}
if len(allErrs) > 0 {
return allErrs.ToAggregate()
}
// TODO: fix this when we have pod level cgroups
// since we do not yet pod level requests/limits, we need to ensure each
// container makes an explict request or limit for a quota tracked resource
requiredSet := quota.ToSet(required)
missingSet := sets.NewString()
for i := range pod.Spec.Containers {
requests := pod.Spec.Containers[i].Resources.Requests
limits := pod.Spec.Containers[i].Resources.Limits
containerUsage := podUsageHelper(requests, limits)
containerSet := quota.ToSet(quota.ResourceNames(containerUsage))
if !containerSet.Equal(requiredSet) {
difference := requiredSet.Difference(containerSet)
missingSet.Insert(difference.List()...)
}
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例12: Constraints
// Constraints verifies that all required resources are present on the item.
func (p *pvcEvaluator) Constraints(required []api.ResourceName, item runtime.Object) error {
pvc, ok := item.(*api.PersistentVolumeClaim)
if !ok {
return fmt.Errorf("unexpected input object %v", item)
}
// these are the items that we will be handling based on the objects actual storage-class
pvcRequiredSet := append([]api.ResourceName{}, pvcResources...)
if storageClassRef := util.GetClaimStorageClass(pvc); len(storageClassRef) > 0 {
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourcePersistentVolumeClaims))
pvcRequiredSet = append(pvcRequiredSet, ResourceByStorageClass(storageClassRef, api.ResourceRequestsStorage))
}
// in effect, this will remove things from the required set that are not tied to this pvcs storage class
// for example, if a quota has bronze and gold storage class items defined, we should not error a bronze pvc for not being gold.
// but we should error a bronze pvc if it doesn't make a storage request size...
requiredResources := quota.Intersection(required, pvcRequiredSet)
requiredSet := quota.ToSet(requiredResources)
// usage for this pvc will only include global pvc items + this storage class specific items
pvcUsage, err := p.Usage(item)
if err != nil {
return err
}
// determine what required resources were not tracked by usage.
missingSet := sets.NewString()
pvcSet := quota.ToSet(quota.ResourceNames(pvcUsage))
if diff := requiredSet.Difference(pvcSet); len(diff) > 0 {
missingSet.Insert(diff.List()...)
}
if len(missingSet) == 0 {
return nil
}
return fmt.Errorf("must specify %s", strings.Join(missingSet.List(), ","))
}
示例13: admitBlobWrite
// admitBlobWrite checks whether the blob does not exceed image quota, if set. Returns
// ErrAccessDenied error if the quota is exceeded.
func admitBlobWrite(ctx context.Context, repo *repository) error {
rqs, err := repo.quotaClient.ResourceQuotas(repo.namespace).List(kapi.ListOptions{})
if err != nil {
if kerrors.IsForbidden(err) {
context.GetLogger(ctx).Warnf("Cannot list resourcequotas because of outdated cluster roles: %v", err)
return nil
}
context.GetLogger(ctx).Errorf("Failed to list resourcequotas: %v", err)
return err
}
usage := kapi.ResourceList{
// we are about to tag a single image to an image stream
imageapi.ResourceImages: *resource.NewQuantity(1, resource.DecimalSI),
}
resources := quota.ResourceNames(usage)
for _, rq := range rqs.Items {
newUsage := quota.Add(usage, rq.Status.Used)
newUsage = quota.Mask(newUsage, resources)
requested := quota.Mask(rq.Spec.Hard, resources)
allowed, exceeded := quota.LessThanOrEqual(newUsage, requested)
if !allowed {
details := make([]string, len(exceeded))
by := quota.Subtract(newUsage, requested)
for i, r := range exceeded {
details[i] = fmt.Sprintf("%s limited to %s by %s", r, requested[r], by[r])
}
context.GetLogger(ctx).Error("Refusing to write blob exceeding quota: " + strings.Join(details, ", "))
return distribution.ErrAccessDenied
}
}
return nil
}
示例14: syncResourceQuota
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQuota) (err error) {
// quota is dirty if any part of spec hard limits differs from the status hard limits
dirty := !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
// dirty tracks if the usage status differs from the previous sync,
// if so, we send a new usage with latest status
// if this is our first sync, it will be dirty by default, since we need track usage
dirty = dirty || (resourceQuota.Status.Hard == nil || resourceQuota.Status.Used == nil)
// Create a usage object that is based on the quota resource version that will handle updates
// by default, we preserve the past usage observation, and set hard to the current spec
previousUsed := api.ResourceList{}
if resourceQuota.Status.Used != nil {
previousUsed = quota.Add(api.ResourceList{}, resourceQuota.Status.Used)
}
usage := api.ResourceQuota{
ObjectMeta: api.ObjectMeta{
Name: resourceQuota.Name,
Namespace: resourceQuota.Namespace,
ResourceVersion: resourceQuota.ResourceVersion,
Labels: resourceQuota.Labels,
Annotations: resourceQuota.Annotations},
Status: api.ResourceQuotaStatus{
Hard: quota.Add(api.ResourceList{}, resourceQuota.Spec.Hard),
Used: previousUsed,
},
}
// find the intersection between the hard resources on the quota
// and the resources this controller can track to know what we can
// look to measure updated usage stats for
hardResources := quota.ResourceNames(usage.Status.Hard)
potentialResources := []api.ResourceName{}
evaluators := rq.registry.Evaluators()
for _, evaluator := range evaluators {
potentialResources = append(potentialResources, evaluator.MatchesResources()...)
}
matchedResources := quota.Intersection(hardResources, potentialResources)
// sum the observed usage from each evaluator
newUsage := api.ResourceList{}
usageStatsOptions := quota.UsageStatsOptions{Namespace: resourceQuota.Namespace, Scopes: resourceQuota.Spec.Scopes}
for _, evaluator := range evaluators {
stats, err := evaluator.UsageStats(usageStatsOptions)
if err != nil {
return err
}
newUsage = quota.Add(newUsage, stats.Used)
}
// mask the observed usage to only the set of resources tracked by this quota
// merge our observed usage with the quota usage status
// if the new usage is different than the last usage, we will need to do an update
newUsage = quota.Mask(newUsage, matchedResources)
for key, value := range newUsage {
usage.Status.Used[key] = value
}
dirty = dirty || !quota.Equals(usage.Status.Used, resourceQuota.Status.Used)
// there was a change observed by this controller that requires we update quota
if dirty {
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
return err
}
return nil
}
示例15: TestImageStreamEvaluatorUsageStats
func TestImageStreamEvaluatorUsageStats(t *testing.T) {
for _, tc := range []struct {
name string
iss []imageapi.ImageStream
namespace string
expectedISCount int64
}{
{
name: "no image stream",
iss: []imageapi.ImageStream{},
namespace: "test",
expectedISCount: 0,
},
{
name: "one image stream",
iss: []imageapi.ImageStream{
{
ObjectMeta: kapi.ObjectMeta{
Namespace: "test",
Name: "onetag",
},
},
},
namespace: "test",
expectedISCount: 1,
},
{
name: "two image streams",
iss: []imageapi.ImageStream{
{
ObjectMeta: kapi.ObjectMeta{
Namespace: "test",
Name: "is1",
},
},
{
ObjectMeta: kapi.ObjectMeta{
Namespace: "test",
Name: "is2",
},
},
},
namespace: "test",
expectedISCount: 2,
},
{
name: "two image streams in different namespaces",
iss: []imageapi.ImageStream{
{
ObjectMeta: kapi.ObjectMeta{
Namespace: "test",
Name: "is1",
},
},
{
ObjectMeta: kapi.ObjectMeta{
Namespace: "other",
Name: "is2",
},
},
},
namespace: "test",
expectedISCount: 1,
},
} {
fakeClient := &testclient.Fake{}
fakeClient.AddReactor("list", "imagestreams", imagetest.GetFakeImageStreamListHandler(t, tc.iss...))
evaluator := NewImageStreamEvaluator(fakeClient)
stats, err := evaluator.UsageStats(kquota.UsageStatsOptions{Namespace: tc.namespace})
if err != nil {
t.Errorf("[%s]: could not get usage stats for namespace %q: %v", tc.name, tc.namespace, err)
continue
}
expectedUsage := imagetest.ExpectedResourceListFor(tc.expectedISCount)
expectedResources := kquota.ResourceNames(expectedUsage)
if len(stats.Used) != len(expectedResources) {
t.Errorf("[%s]: got unexpected number of computed resources: %d != %d", tc.name, len(stats.Used), len(expectedResources))
}
masked := kquota.Mask(stats.Used, expectedResources)
if len(masked) != len(expectedResources) {
for k := range stats.Used {
if _, exists := masked[k]; !exists {
t.Errorf("[%s]: got unexpected resource %q from Usage() method", tc.name, k)
}
}
for _, k := range expectedResources {
if _, exists := masked[k]; !exists {
t.Errorf("[%s]: expected resource %q not computed", tc.name, k)
}
}
}
//.........这里部分代码省略.........