本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache.NewStore函数的典型用法代码示例。如果您正苦于以下问题:Golang NewStore函数的具体用法?Golang NewStore怎么用?Golang NewStore使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewStore函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Create
// Create creates a scheduler and all support functions.
func (factory *ConfigFactory) Create() *scheduler.Config {
// Watch and queue pods that need scheduling.
podQueue := cache.NewFIFO()
cache.NewReflector(factory.createUnassignedPodLW(), &api.Pod{}, podQueue).Run()
// Watch and cache all running pods. Scheduler needs to find all pods
// so it knows where it's safe to place a pod. Cache this locally.
podCache := cache.NewStore()
cache.NewReflector(factory.createAssignedPodLW(), &api.Pod{}, podCache).Run()
// Watch minions.
// Minions may be listed frequently, so provide a local up-to-date cache.
minionCache := cache.NewStore()
if false {
// Disable this code until minions support watches.
cache.NewReflector(factory.createMinionLW(), &api.Minion{}, minionCache).Run()
} else {
cache.NewPoller(factory.pollMinions, 10*time.Second, minionCache).Run()
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
minionLister := &storeToMinionLister{minionCache}
algo := algorithm.NewGenericScheduler(
[]algorithm.FitPredicate{
// Fit is defined based on the absence of port conflicts.
algorithm.PodFitsPorts,
// Fit is determined by resource availability
algorithm.NewResourceFitPredicate(minionLister),
// Fit is determined by non-conflicting disk volumes
algorithm.NoDiskConflict,
// Fit is determined by node selector query
algorithm.NewSelectorMatchPredicate(minionLister),
},
// Prioritize nodes by least requested utilization.
algorithm.LeastRequestedPriority,
&storeToPodLister{podCache}, r)
podBackoff := podBackoff{
perPodBackoff: map[string]*backoffEntry{},
clock: realClock{},
}
return &scheduler.Config{
MinionLister: minionLister,
Algorithm: algo,
Binder: &binder{factory.Client},
NextPod: func() *api.Pod {
pod := podQueue.Pop().(*api.Pod)
glog.V(2).Infof("About to try and schedule pod %v\n"+
"\tknown minions: %v\n"+
"\tknown scheduled pods: %v\n",
pod.Name, minionCache.ContainedIDs(), podCache.ContainedIDs())
return pod
},
Error: factory.makeDefaultErrorFunc(&podBackoff, podQueue),
}
}
示例2: TestModeler
func TestModeler(t *testing.T) {
table := []struct {
queuedPods []*api.Pod
scheduledPods []*api.Pod
assumedPods []*api.Pod
expectPods names
}{
{
queuedPods: names{}.list(),
scheduledPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
assumedPods: names{{"default", "foo"}}.list(),
expectPods: names{{"default", "foo"}, {"custom", "foo"}},
}, {
queuedPods: names{}.list(),
scheduledPods: names{{"default", "foo"}}.list(),
assumedPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
expectPods: names{{"default", "foo"}, {"custom", "foo"}},
}, {
queuedPods: names{{"custom", "foo"}}.list(),
scheduledPods: names{{"default", "foo"}}.list(),
assumedPods: names{{"default", "foo"}, {"custom", "foo"}}.list(),
expectPods: names{{"default", "foo"}},
},
}
for _, item := range table {
q := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
for _, pod := range item.queuedPods {
q.Store.Add(pod)
}
s := &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
for _, pod := range item.scheduledPods {
s.Store.Add(pod)
}
m := NewSimpleModeler(q, s)
for _, pod := range item.assumedPods {
m.AssumePod(pod)
}
list, err := m.PodLister().List(labels.Everything())
if err != nil {
t.Errorf("unexpected error: %v", err)
}
found := 0
for _, pod := range list {
if item.expectPods.has(pod) {
found++
} else {
t.Errorf("found unexpected pod %#v", pod)
}
}
if e, a := item.expectPods, found; len(e) != a {
t.Errorf("Expected pods:\n%+v\nFound pods:\n%s\n", podNames(e.list()), podNames(list))
}
}
}
示例3: newKube2Sky
func newKube2Sky(ec etcdClient) *kube2sky {
return &kube2sky{
etcdClient: ec,
domain: testDomain,
etcdMutationTimeout: time.Second,
endpointsStore: cache.NewStore(cache.MetaNamespaceKeyFunc),
servicesStore: cache.NewStore(cache.MetaNamespaceKeyFunc),
}
}
示例4: NewConfigFactory
// Initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
c := &ConfigFactory{
Client: client,
PodQueue: cache.NewFIFO(cache.MetaNamespaceKeyFunc),
ScheduledPodLister: &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
NodeLister: &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
ServiceLister: &cache.StoreToServiceLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
}
modeler := scheduler.NewSimpleModeler(&cache.StoreToPodLister{c.PodQueue}, c.ScheduledPodLister)
c.modeler = modeler
c.PodLister = modeler.PodLister()
return c
}
示例5: Run
// Run starts a background goroutine that watches for changes to services that
// have (or had) externalLoadBalancers=true and ensures that they have external
// load balancers created and deleted appropriately.
// nodeSyncPeriod controls how often we check the cluster's nodes to determine
// if external load balancers need to be updated to point to a new set.
func (s *ServiceController) Run(nodeSyncPeriod time.Duration) error {
if err := s.init(); err != nil {
return err
}
// We have to make this check beecause the ListWatch that we use in
// WatchServices requires Client functions that aren't in the interface
// for some reason.
if _, ok := s.kubeClient.(*client.Client); !ok {
return fmt.Errorf("ServiceController only works with real Client objects, but was passed something else satisfying the client Interface.")
}
// Get the currently existing set of services and then all future creates
// and updates of services.
// No delta compressor is needed for the DeltaFIFO queue because we only ever
// care about the most recent state.
serviceQueue := cache.NewDeltaFIFO(cache.MetaNamespaceKeyFunc, nil, s.cache)
lw := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "services", api.NamespaceAll, fields.Everything())
cache.NewReflector(lw, &api.Service{}, serviceQueue, 0).Run()
for i := 0; i < workerGoroutines; i++ {
go s.watchServices(serviceQueue)
}
nodeLister := &cache.StoreToNodeLister{cache.NewStore(cache.MetaNamespaceKeyFunc)}
nodeLW := cache.NewListWatchFromClient(s.kubeClient.(*client.Client), "nodes", api.NamespaceAll, fields.Everything())
cache.NewReflector(nodeLW, &api.Node{}, nodeLister.Store, 0).Run()
go s.nodeSyncLoop(nodeLister, nodeSyncPeriod)
return nil
}
示例6: NewFirstContainerReady
func NewFirstContainerReady(kclient kclient.Interface, timeout time.Duration, interval time.Duration) *FirstContainerReady {
return &FirstContainerReady{
timeout: timeout,
interval: interval,
podsForDeployment: func(deployment *kapi.ReplicationController) (*kapi.PodList, error) {
selector := labels.Set(deployment.Spec.Selector).AsSelector()
return kclient.Pods(deployment.Namespace).List(selector, fields.Everything())
},
getPodStore: func(namespace, name string) (cache.Store, chan struct{}) {
sel, _ := fields.ParseSelector("metadata.name=" + name)
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
lw := &deployutil.ListWatcherImpl{
ListFunc: func() (runtime.Object, error) {
return kclient.Pods(namespace).List(labels.Everything(), sel)
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return kclient.Pods(namespace).Watch(labels.Everything(), sel, resourceVersion)
},
}
stop := make(chan struct{})
cache.NewReflector(lw, &kapi.Pod{}, store, 10*time.Second).RunUntil(stop)
return store, stop
},
}
}
示例7: NewTestController
func NewTestController(kube kubeclient.Interface, client contrail.ApiClient, allocator AddressAllocator, networkMgr NetworkManager) *Controller {
controller := new(Controller)
controller.serviceStore = cache.NewStore(testKeyFunc)
controller.eventChannel = make(chan notification, 32)
controller.kube = kube
controller.config = NewConfig()
controller.config.PublicSubnet = "100.64.0.0/10"
controller.client = client
if allocator == nil {
controller.allocator = NewAddressAllocator(client, controller.config)
} else {
controller.allocator = allocator
}
controller.instanceMgr = NewInstanceManager(client, controller.allocator)
if networkMgr == nil {
controller.networkMgr = NewNetworkManager(client, controller.config)
} else {
controller.networkMgr = networkMgr
}
controller.serviceMgr = NewServiceManager(client, controller.config, controller.networkMgr)
controller.namespaceMgr = NewNamespaceManager(client)
return controller
}
示例8: Create
// Create creates a new ImageChangeController which is used to trigger builds when a new
// image is available
func (factory *ImageChangeControllerFactory) Create() controller.RunnableController {
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&imageStreamLW{factory.Client}, &imageapi.ImageStream{}, queue, 2*time.Minute).Run()
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
cache.NewReflector(&buildConfigLW{client: factory.Client}, &buildapi.BuildConfig{}, store, 2*time.Minute).Run()
imageChangeController := &buildcontroller.ImageChangeController{
BuildConfigStore: store,
BuildConfigInstantiator: factory.BuildConfigInstantiator,
Stop: factory.Stop,
}
return &controller.RetryController{
Queue: queue,
RetryManager: controller.NewQueueRetryManager(
queue,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
kutil.HandleError(err)
if _, isFatal := err.(buildcontroller.ImageChangeControllerFatalError); isFatal {
return false
}
return retries.Count < maxRetries
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
imageRepo := obj.(*imageapi.ImageStream)
return imageChangeController.HandleImageRepo(imageRepo)
},
}
}
示例9: NewSimpleModeler
// NewSimpleModeler returns a new SimpleModeler.
// queuedPods: a PodLister that will return pods that have not been scheduled yet.
// scheduledPods: a PodLister that will return pods that we know for sure have been scheduled.
func NewSimpleModeler(queuedPods, scheduledPods ExtendedPodLister) *SimpleModeler {
return &SimpleModeler{
queuedPods: queuedPods,
scheduledPods: scheduledPods,
assumedPods: &cache.StoreToPodLister{cache.NewStore(cache.MetaNamespaceKeyFunc)},
}
}
示例10: TestAdmissionExists
// TestAdmissionExists verifies you cannot create Origin content if namespace is not known
func TestAdmissionExists(t *testing.T) {
mockClient := &testclient.Fake{
Err: fmt.Errorf("DOES NOT EXIST"),
}
projectcache.FakeProjectCache(mockClient, cache.NewStore(cache.MetaNamespaceKeyFunc), "")
handler := &lifecycle{client: mockClient}
build := &buildapi.Build{
ObjectMeta: kapi.ObjectMeta{Name: "buildid"},
Parameters: buildapi.BuildParameters{
Source: buildapi.BuildSource{
Type: buildapi.BuildSourceGit,
Git: &buildapi.GitBuildSource{
URI: "http://github.com/my/repository",
},
ContextDir: "context",
},
Strategy: buildapi.BuildStrategy{
Type: buildapi.DockerBuildStrategyType,
DockerStrategy: &buildapi.DockerBuildStrategy{},
},
Output: buildapi.BuildOutput{
DockerImageReference: "repository/data",
},
},
Status: buildapi.BuildStatusNew,
}
err := handler.Admit(admission.NewAttributesRecord(build, "Build", "bogus-ns", "builds", "CREATE", nil))
if err == nil {
t.Errorf("Expected an error because namespace does not exist")
}
}
示例11: TestStoreToPodLister
func TestStoreToPodLister(t *testing.T) {
store := cache.NewStore()
ids := []string{"foo", "bar", "baz"}
for _, id := range ids {
store.Add(id, &api.Pod{
JSONBase: api.JSONBase{ID: id},
Labels: map[string]string{"name": id},
})
}
spl := storeToPodLister{store}
for _, id := range ids {
got, err := spl.ListPods(labels.Set{"name": id}.AsSelector())
if err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
if e, a := 1, len(got); e != a {
t.Errorf("Expected %v, got %v", e, a)
continue
}
if e, a := id, got[0].ID; e != a {
t.Errorf("Expected %v, got %v", e, a)
continue
}
}
}
示例12: newPodsApi
func newPodsApi(client *kclient.Client) podsApi {
// Extend the selector to include specific nodes to monitor
// or provide an API to update the nodes to monitor.
selector, err := kSelector.ParseSelector("spec.nodeName!=")
if err != nil {
panic(err)
}
lw := kcache.NewListWatchFromClient(client, "pods", kapi.NamespaceAll, selector)
podLister := &kcache.StoreToPodLister{Store: kcache.NewStore(kcache.MetaNamespaceKeyFunc)}
// Watch and cache all running pods.
reflector := kcache.NewReflector(lw, &kapi.Pod{}, podLister.Store, 0)
stopChan := make(chan struct{})
reflector.RunUntil(stopChan)
nStore, nController := kframework.NewInformer(
createNamespaceLW(client),
&kapi.Namespace{},
resyncPeriod,
kframework.ResourceEventHandlerFuncs{})
go nController.Run(util.NeverStop)
podsApi := &realPodsApi{
client: client,
podLister: podLister,
stopChan: stopChan,
reflector: reflector,
namespaceStore: nStore,
}
return podsApi
}
示例13: TestAdmission
// TestAdmission verifies a namespace is created on create requests for namespace managed resources
func TestAdmission(t *testing.T) {
namespace := "test"
mockClient := &testclient.Fake{}
handler := &provision{
client: mockClient,
store: cache.NewStore(cache.MetaNamespaceKeyFunc),
}
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespace, "pods", "", admission.Create, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler")
}
if len(mockClient.Actions) != 1 {
t.Errorf("Expected a create-namespace request")
}
if mockClient.Actions[0].Action != "create-namespace" {
t.Errorf("Expected a create-namespace request to be made via the client")
}
}
示例14: TestAdmissionNamespaceExists
// TestAdmissionNamespaceExists verifies that no client call is made when a namespace already exists
func TestAdmissionNamespaceExists(t *testing.T) {
namespace := "test"
mockClient := &testclient.Fake{}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(&api.Namespace{
ObjectMeta: api.ObjectMeta{Name: namespace},
})
handler := &provision{
client: mockClient,
store: store,
}
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", namespace, "pods", "", admission.Create, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler")
}
if len(mockClient.Actions) != 0 {
t.Errorf("No client request should have been made")
}
}
示例15: RunProjectCache
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
if pcache != nil {
return
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&kapi.Namespace{},
store,
0,
)
reflector.Run()
pcache = &ProjectCache{
Client: c,
Store: store,
DefaultNodeSelector: defaultNodeSelector,
}
}