本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/cache.NewIndexer函数的典型用法代码示例。如果您正苦于以下问题:Golang NewIndexer函数的具体用法?Golang NewIndexer怎么用?Golang NewIndexer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewIndexer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestAPIGroupMissing
func TestAPIGroupMissing(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
handler := &apiGroupHandler{
lister: listers.NewAPIServiceLister(indexer),
groupName: "foo",
}
server := httptest.NewServer(handler)
defer server.Close()
resp, err := http.Get(server.URL + "/apis/groupName/foo")
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusNotFound {
t.Fatalf("expected %v, got %v", resp.StatusCode, http.StatusNotFound)
}
// foo still has no api services for it (like it was deleted), it should 404
resp, err = http.Get(server.URL + "/apis/groupName/")
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != http.StatusNotFound {
t.Fatalf("expected %v, got %v", resp.StatusCode, http.StatusNotFound)
}
}
示例2: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client, schedulerName string, hardPodAffinitySymmetricWeight int, failureDomains string) *ConfigFactory {
stopEverything := make(chan struct{})
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{},
// Only nodes in the "Ready" condition with status == "True" are schedulable
NodeLister: &cache.StoreToNodeLister{},
PVLister: &cache.StoreToPVFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
PVCLister: &cache.StoreToPVCFetcher{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{Indexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
ControllerLister: &cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
ReplicaSetLister: &cache.StoreToReplicaSetLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)},
schedulerCache: schedulerCache,
StopEverything: stopEverything,
SchedulerName: schedulerName,
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
FailureDomains: failureDomains,
}
c.PodLister = schedulerCache
// On add/delete to the scheduled pods, remove from the assumed pods.
// We construct this here instead of in CreateFromKeys because
// ScheduledPodLister is something we provide to plug in functions that
// they may need to call.
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
c.createAssignedNonTerminatedPodLW(),
&api.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: c.addPodToCache,
UpdateFunc: c.updatePodInCache,
DeleteFunc: c.deletePodFromCache,
},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
c.createNodeLW(),
&api.Node{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: c.addNodeToCache,
UpdateFunc: c.updateNodeInCache,
DeleteFunc: c.deleteNodeFromCache,
},
)
return c
}
示例3: newFakeDisruptionController
func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
ps := &pdbStates{}
dc := &DisruptionController{
pdbLister: cache.StoreToPodDisruptionBudgetLister{Store: cache.NewStore(controller.KeyFunc)},
podLister: cache.StoreToPodLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{})},
rcLister: cache.StoreToReplicationControllerLister{Indexer: cache.NewIndexer(controller.KeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})},
rsLister: cache.StoreToReplicaSetLister{Store: cache.NewStore(controller.KeyFunc)},
dLister: cache.StoreToDeploymentLister{Store: cache.NewStore(controller.KeyFunc)},
getUpdater: func() updater { return ps.Set },
}
return dc, ps
}
示例4: TestAdmissionIgnoresSubresources
func TestAdmissionIgnoresSubresources(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := createResourceQuota(&testclient.Fake{}, indexer)
quota := &api.ResourceQuota{}
quota.Name = "quota"
quota.Namespace = "test"
quota.Status = api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
indexer.Add(quota)
newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod exceeded allowed quota")
}
err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, newPod.Name, "pods", "subresource", admission.Create, nil))
if err != nil {
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
}
}
示例5: NewGroupCache
func NewGroupCache(groupRegistry groupregistry.Registry) *GroupCache {
allNamespaceContext := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{byUserIndexName: ByUserIndexKeys})
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
opts := &unversioned.ListOptions{
LabelSelector: unversioned.LabelSelector{Selector: options.LabelSelector},
FieldSelector: unversioned.FieldSelector{Selector: options.FieldSelector},
ResourceVersion: options.ResourceVersion,
}
return groupRegistry.ListGroups(allNamespaceContext, opts)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
opts := &unversioned.ListOptions{
LabelSelector: unversioned.LabelSelector{Selector: options.LabelSelector},
FieldSelector: unversioned.FieldSelector{Selector: options.FieldSelector},
ResourceVersion: options.ResourceVersion,
}
return groupRegistry.WatchGroups(allNamespaceContext, opts)
},
},
&userapi.Group{},
indexer,
// TODO this was chosen via copy/paste. If or when we choose to standardize these in some way, be sure to update this.
2*time.Minute,
)
return &GroupCache{
indexer: indexer,
reflector: reflector,
}
}
示例6: NewCachedServiceAccessorAndStore
func NewCachedServiceAccessorAndStore() (ServiceAccessor, cache.Store) {
store := cache.NewIndexer(cache.MetaNamespaceKeyFunc, map[string]cache.IndexFunc{
"clusterIP": indexServiceByClusterIP, // for reverse lookups
"namespace": cache.MetaNamespaceIndexFunc,
})
return &cachedServiceAccessor{store: store}, store
}
示例7: TestAdmissionIgnoresSubresources
func TestAdmissionIgnoresSubresources(t *testing.T) {
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := createResourceQuota(&testclient.Fake{}, indexer)
quota := &api.ResourceQuota{}
quota.Name = "quota"
quota.Namespace = "test"
quota.Status = api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
quota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
quota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
indexer.Add(quota)
newPod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: quota.Namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements("100m", "2Gi")}},
}}
err := handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod exceeded allowed quota")
}
err = handler.Admit(admission.NewAttributesRecord(newPod, "Pod", newPod.Namespace, "123", "pods", "subresource", admission.Create, nil))
if err != nil {
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
}
}
示例8: TestLimitRangerCacheAndLRUMisses
func TestLimitRangerCacheAndLRUMisses(t *testing.T) {
liveLookupCache, err := lru.New(10000)
if err != nil {
t.Fatal(err)
}
limitRange := validLimitRangeNoDefaults()
client := fake.NewSimpleClientset(&limitRange)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
limitFunc: Limit,
indexer: indexer,
liveLookupCache: liveLookupCache,
}
testPod := validPod("testPod", 1, api.ResourceRequirements{})
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
}
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
if err != nil {
t.Errorf("Should have ignored calls to any subresource of pod %v", err)
}
}
示例9: TestAdmissionIgnoresSubresources
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
resourceQuota := &api.ResourceQuota{}
resourceQuota.Name = "quota"
resourceQuota.Namespace = "test"
resourceQuota.Status = api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
stopCh := make(chan struct{})
defer close(stopCh)
quotaAccessor, _ := newQuotaAccessor(kubeClient)
quotaAccessor.indexer = indexer
go quotaAccessor.Run(stopCh)
evaluator := NewQuotaEvaluator(quotaAccessor, install.NewRegistry(nil, nil), nil, 5, stopCh)
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator,
}
indexer.Add(resourceQuota)
newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod exceeded allowed quota")
}
err = handler.Admit(admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "subresource", admission.Create, nil))
if err != nil {
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
}
}
示例10: NewReadOnlyClusterPolicyCache
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) *readOnlyClusterPolicyCache {
ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func(options kapi.ListOptions) (runtime.Object, error) {
return registry.ListClusterPolicies(ctx, &options)
},
WatchFunc: func(options kapi.ListOptions) (watch.Interface, error) {
return registry.WatchClusterPolicies(ctx, &options)
},
},
&authorizationapi.ClusterPolicy{},
indexer,
2*time.Minute,
)
return &readOnlyClusterPolicyCache{
registry: registry,
indexer: indexer,
reflector: reflector,
keyFunc: cache.MetaNamespaceKeyFunc,
}
}
示例11: TestAdmitEnforceQuotaConstraints
// TestAdmitEnforceQuotaConstraints verifies that if a quota tracks a particular resource that that resource is
// specified on the pod. In this case, we create a quota that tracks cpu request, memory request, and memory limit.
// We ensure that a pod that does not specify a memory limit that it fails in admission.
func TestAdmitEnforceQuotaConstraints(t *testing.T) {
resourceQuota := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"),
api.ResourceLimitsMemory: resource.MustParse("200Gi"),
api.ResourcePods: resource.MustParse("5"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("1"),
api.ResourceMemory: resource.MustParse("50Gi"),
api.ResourceLimitsMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("3"),
},
},
}
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: kubeClient,
indexer: indexer,
registry: install.NewRegistry(kubeClient),
}
handler.indexer.Add(resourceQuota)
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod does not specify a memory limit")
}
}
示例12: TestLimitRangerCacheAndLRUExpiredMisses
func TestLimitRangerCacheAndLRUExpiredMisses(t *testing.T) {
liveLookupCache, err := lru.New(10000)
if err != nil {
t.Fatal(err)
}
limitRange := validLimitRangeNoDefaults()
client := fake.NewSimpleClientset(&limitRange)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
actions: &DefaultLimitRangerActions{},
indexer: indexer,
liveLookupCache: liveLookupCache,
}
testPod := validPod("testPod", 1, api.ResourceRequirements{})
// add to the lru cache
liveLookupCache.Add(limitRange.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(-30 * time.Second)), items: []*api.LimitRange{}})
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
}
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod").WithVersion("version"), limitRange.Namespace, "testPod", api.Resource("pods").WithVersion("version"), "status", admission.Update, nil))
if err != nil {
t.Errorf("Should have ignored calls to any subresource of pod %v", err)
}
}
示例13: TestAdmitExceedQuotaLimit
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
func TestAdmitExceedQuotaLimit(t *testing.T) {
resourceQuota := &api.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("1"),
api.ResourceMemory: resource.MustParse("50Gi"),
api.ResourcePods: resource.MustParse("3"),
},
},
}
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
evaluator, _ := newQuotaEvaluator(kubeClient, install.NewRegistry(kubeClient))
evaluator.indexer = indexer
evaluator.Run(5)
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
evaluator: evaluator,
}
indexer.Add(resourceQuota)
newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, api.Resource("pods").WithVersion("version"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error exceeding quota")
}
}
示例14: TestLimitRangerIgnoresSubresource
func TestLimitRangerIgnoresSubresource(t *testing.T) {
client := testclient.NewSimpleFake()
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
limitFunc: Limit,
indexer: indexer,
}
limitRange := validLimitRangeNoDefaults()
testPod := validPod("testPod", 1, api.ResourceRequirements{})
indexer.Add(&limitRange)
err := handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "testPod", "pods", "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
}
err = handler.Admit(admission.NewAttributesRecord(&testPod, "Pod", limitRange.Namespace, "testPod", "pods", "status", admission.Update, nil))
if err != nil {
t.Errorf("Should have ignored calls to any subresource of pod %v", err)
}
}
示例15: TestAdmissionIgnoresSubresources
// TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
// It verifies that creation of a pod that would have exceeded quota is properly failed
// It verifies that create operations to a subresource that would have exceeded quota would succeed
func TestAdmissionIgnoresSubresources(t *testing.T) {
resourceQuota := &api.ResourceQuota{}
resourceQuota.Name = "quota"
resourceQuota.Namespace = "test"
resourceQuota.Status = api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
resourceQuota.Status.Hard[api.ResourceMemory] = resource.MustParse("2Gi")
resourceQuota.Status.Used[api.ResourceMemory] = resource.MustParse("1Gi")
kubeClient := fake.NewSimpleClientset(resourceQuota)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := "aAdmission{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: kubeClient,
indexer: indexer,
registry: install.NewRegistry(kubeClient),
}
handler.indexer.Add(resourceQuota)
newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
err := handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an error because the pod exceeded allowed quota")
}
err = handler.Admit(admission.NewAttributesRecord(newPod, api.Kind("Pod"), newPod.Namespace, newPod.Name, api.Resource("pods"), "subresource", admission.Create, nil))
if err != nil {
t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
}
}