本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/fields.Everything函数的典型用法代码示例。如果您正苦于以下问题:Golang Everything函数的具体用法?Golang Everything怎么用?Golang Everything使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Everything函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewDockercfgController
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl client.Interface, options DockercfgControllerOptions) *DockercfgController {
e := &DockercfgController{
client: cl,
}
_, e.serviceAccountController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.ServiceAccount{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: e.serviceAccountAdded,
UpdateFunc: e.serviceAccountUpdated,
},
)
e.dockerURL = options.DefaultDockerURL
return e
}
示例2: RunProjectCache
func RunProjectCache(c client.Interface, defaultNodeSelector string) {
if pcache != nil {
return
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&kapi.Namespace{},
store,
0,
)
reflector.Run()
pcache = &ProjectCache{
Client: c,
Store: store,
DefaultNodeSelector: defaultNodeSelector,
}
}
示例3: watchNodes
func watchNodes(client *client.Client) {
nodeList, err := client.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
log.Fatal(err)
}
nodes := nodeList.Items
writeNodeTargetsFile(nodes)
watcher, err := client.Nodes().Watch(labels.Everything(), fields.Everything(), nodeList.ResourceVersion)
if err != nil {
log.Fatal(err)
}
for event := range watcher.ResultChan() {
switch event.Type {
case watch.Added:
switch obj := event.Object.(type) {
case *api.Node:
nodes = append(nodes, *obj)
}
writeNodeTargetsFile(nodes)
case watch.Deleted:
switch obj := event.Object.(type) {
case *api.Node:
index := findNodeIndexInSlice(nodes, obj)
nodes = append(nodes[:index], nodes[index+1:]...)
}
writeNodeTargetsFile(nodes)
}
}
}
示例4: WatchMinions
func (oi *OsdnRegistryInterface) WatchMinions(receiver chan *osdnapi.MinionEvent, stop chan bool) error {
minionEventQueue := oscache.NewEventQueue(cache.MetaNamespaceKeyFunc)
listWatch := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return oi.kClient.Nodes().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return oi.kClient.Nodes().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
cache.NewReflector(listWatch, &kapi.Node{}, minionEventQueue, 4*time.Minute).Run()
for {
eventType, obj, err := minionEventQueue.Pop()
if err != nil {
return err
}
switch eventType {
case watch.Added:
// we should ignore the modified event because status updates cause unnecessary noise
// the only time we would care about modified would be if the minion changes its IP address
// and hence all nodes need to update their vtep entries for the respective subnet
// create minionEvent
node := obj.(*kapi.Node)
receiver <- &osdnapi.MinionEvent{Type: osdnapi.Added, Minion: node.ObjectMeta.Name}
case watch.Deleted:
// TODO: There is a chance that a Delete event will not get triggered.
// Need to use a periodic sync loop that lists and compares.
node := obj.(*kapi.Node)
receiver <- &osdnapi.MinionEvent{Type: osdnapi.Deleted, Minion: node.ObjectMeta.Name}
}
}
return nil
}
示例5: NewReadOnlyClusterPolicyCache
func NewReadOnlyClusterPolicyCache(registry clusterpolicyregistry.WatchingRegistry) readOnlyClusterPolicyCache {
ctx := kapi.WithNamespace(kapi.NewContext(), kapi.NamespaceAll)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return registry.ListClusterPolicies(ctx, labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return registry.WatchClusterPolicies(ctx, labels.Everything(), fields.Everything(), resourceVersion)
},
},
&authorizationapi.ClusterPolicy{},
indexer,
2*time.Minute,
)
return readOnlyClusterPolicyCache{
registry: registry,
indexer: indexer,
reflector: *reflector,
keyFunc: cache.MetaNamespaceKeyFunc,
}
}
示例6: NewNamespaceManager
// NewNamespaceManager creates a new NamespaceManager
func NewNamespaceManager(kubeClient client.Interface, resyncPeriod time.Duration) *NamespaceManager {
_, controller := framework.NewInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return kubeClient.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return kubeClient.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&api.Namespace{},
resyncPeriod,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace)
syncNamespace(kubeClient, *namespace)
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*api.Namespace)
syncNamespace(kubeClient, *namespace)
},
},
)
return &NamespaceManager{
controller: controller,
}
}
示例7: Create
// Create creates an ImportController.
func (f *ImportControllerFactory) Create() controller.RunnableController {
lw := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return f.Client.ImageStreams(kapi.NamespaceAll).Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
}
q := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
cache.NewReflector(lw, &api.ImageStream{}, q, 2*time.Minute).Run()
c := &ImportController{
client: dockerregistry.NewClient(),
streams: f.Client,
mappings: f.Client,
}
return &controller.RetryController{
Queue: q,
RetryManager: controller.NewQueueRetryManager(
q,
cache.MetaNamespaceKeyFunc,
func(obj interface{}, err error, retries controller.Retry) bool {
util.HandleError(err)
return retries.Count < 5
},
kutil.NewTokenBucketRateLimiter(1, 10),
),
Handle: func(obj interface{}) error {
r := obj.(*api.ImageStream)
return c.Next(r)
},
}
}
示例8: startPods
// Simplified version of RunRC, that does not create RC, but creates plain Pods and
// requires passing whole Pod definition, which is needed to test various Scheduler predicates.
func startPods(c *client.Client, replicas int, ns string, podNamePrefix string, pod api.Pod) {
pods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err)
podsRunningBefore := len(pods.Items)
for i := 0; i < replicas; i++ {
podName := fmt.Sprintf("%v-%v", podNamePrefix, i)
pod.ObjectMeta.Name = podName
pod.ObjectMeta.Labels["name"] = podName
pod.Spec.Containers[0].Name = podName
_, err = c.Pods(ns).Create(&pod)
expectNoError(err)
}
// Wait for pods to start running.
timeout := 2 * time.Minute
startTime := time.Now()
currentlyRunningPods := 0
for podsRunningBefore+replicas != currentlyRunningPods {
allPods, err := c.Pods(api.NamespaceAll).List(labels.Everything(), fields.Everything())
expectNoError(err)
runningPods := 0
for _, pod := range allPods.Items {
if pod.Status.Phase == api.PodRunning {
runningPods += 1
}
}
currentlyRunningPods = runningPods
if startTime.Add(timeout).Before(time.Now()) {
break
}
time.Sleep(5 * time.Second)
}
Expect(currentlyRunningPods).To(Equal(podsRunningBefore + replicas))
}
示例9: checkExistingRCRecovers
func checkExistingRCRecovers(f Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
return true, nil
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
示例10: NewSourceAPI
// NewSourceAPIserver creates config source that watches for changes to the services and endpoints.
func NewSourceAPI(c *client.Client, period time.Duration, servicesChan chan<- ServiceUpdate, endpointsChan chan<- EndpointsUpdate) {
servicesLW := cache.NewListWatchFromClient(c, "services", api.NamespaceAll, fields.Everything())
endpointsLW := cache.NewListWatchFromClient(c, "endpoints", api.NamespaceAll, fields.Everything())
newServicesSourceApiFromLW(servicesLW, period, servicesChan)
newEndpointsSourceApiFromLW(endpointsLW, period, endpointsChan)
}
示例11: MakeGraph
func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, error) {
g := osgraph.New()
svcs, err := d.K.Services(namespace).List(labels.Everything())
if err != nil {
return g, err
}
iss, err := d.C.ImageStreams(namespace).List(labels.Everything(), fields.Everything())
if err != nil {
return g, err
}
bcs, err := d.C.BuildConfigs(namespace).List(labels.Everything(), fields.Everything())
if err != nil {
return g, err
}
dcs, err := d.C.DeploymentConfigs(namespace).List(labels.Everything(), fields.Everything())
if err != nil {
return g, err
}
builds := &buildapi.BuildList{}
if len(bcs.Items) > 0 {
if b, err := d.C.Builds(namespace).List(labels.Everything(), fields.Everything()); err == nil {
builds = b
}
}
rcs, err := d.K.ReplicationControllers(namespace).List(labels.Everything())
if err != nil {
rcs = &kapi.ReplicationControllerList{}
}
for i := range iss.Items {
imagegraph.EnsureImageStreamNode(g, &iss.Items[i])
imagegraph.EnsureAllImageStreamTagNodes(g, &iss.Items[i])
}
for i := range bcs.Items {
build := buildgraph.EnsureBuildConfigNode(g, &bcs.Items[i])
buildedges.AddInputOutputEdges(g, build)
buildedges.JoinBuilds(build, builds.Items)
}
for i := range dcs.Items {
deploy := deploygraph.EnsureDeploymentConfigNode(g, &dcs.Items[i])
deployedges.AddTriggerEdges(g, deploy)
deployedges.JoinDeployments(deploy, rcs.Items)
}
for i := range svcs.Items {
service := kubegraph.EnsureServiceNode(g, &svcs.Items[i])
kubeedges.AddExposedPodTemplateSpecEdges(g, service)
}
imageedges.AddAllImageStreamRefEdges(g)
return g, nil
}
示例12: watchLoop
// watchLoop loops forever looking for new events. If an error occurs it will close the channel and return.
func watchLoop(eventClient kubeclient.EventInterface, eventsChan chan<- eventsUpdate, errorChan chan<- error) {
defer close(eventsChan)
defer close(errorChan)
events, err := eventClient.List(kubelabels.Everything(), kubefields.Everything())
if err != nil {
glog.Errorf("Failed to load events: %v", err)
errorChan <- err
return
}
resourceVersion := events.ResourceVersion
eventsChan <- eventsUpdate{events: events}
watcher, err := eventClient.Watch(kubelabels.Everything(), kubefields.Everything(), resourceVersion)
if err != nil {
glog.Errorf("Failed to start watch for new events: %v", err)
errorChan <- err
return
}
defer watcher.Stop()
watchChannel := watcher.ResultChan()
for {
watchUpdate, ok := <-watchChannel
if !ok {
err := errors.New("watchLoop channel closed")
errorChan <- err
return
}
if watchUpdate.Type == kubewatch.Error {
if status, ok := watchUpdate.Object.(*kubeapi.Status); ok {
err := fmt.Errorf("Error during watch: %#v", status)
errorChan <- err
return
}
err := fmt.Errorf("Received unexpected error: %#v", watchUpdate.Object)
errorChan <- err
return
}
if event, ok := watchUpdate.Object.(*kubeapi.Event); ok {
switch watchUpdate.Type {
case kubewatch.Added, kubewatch.Modified:
eventsChan <- eventsUpdate{&kubeapi.EventList{Items: []kubeapi.Event{*event}}}
case kubewatch.Deleted:
// Deleted events are silently ignored
default:
err := fmt.Errorf("Unknown watchUpdate.Type: %#v", watchUpdate.Type)
errorChan <- err
return
}
resourceVersion = event.ResourceVersion
continue
}
}
}
示例13: newLoadBalancerController
// newLoadBalancerController creates a new controller from the given config.
func newLoadBalancerController(cfg *loadBalancerConfig, kubeClient *client.Client, namespace string) *loadBalancerController {
lbc := loadBalancerController{
cfg: cfg,
client: kubeClient,
queue: workqueue.New(),
reloadRateLimiter: util.NewTokenBucketRateLimiter(
reloadQPS, int(reloadQPS)),
targetService: *targetService,
forwardServices: *forwardServices,
httpPort: *httpPort,
tcpServices: map[string]int{},
}
for _, service := range strings.Split(*tcpServices, ",") {
portSplit := strings.Split(service, ":")
if len(portSplit) != 2 {
glog.Errorf("Ignoring misconfigured TCP service %v", service)
continue
}
if port, err := strconv.Atoi(portSplit[1]); err != nil {
glog.Errorf("Ignoring misconfigured TCP service %v: %v", service, err)
continue
} else {
lbc.tcpServices[portSplit[0]] = port
}
}
enqueue := func(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
glog.Infof("Couldn't get key for object %+v: %v", obj, err)
return
}
lbc.queue.Add(key)
}
eventHandlers := framework.ResourceEventHandlerFuncs{
AddFunc: enqueue,
DeleteFunc: enqueue,
UpdateFunc: func(old, cur interface{}) {
if !reflect.DeepEqual(old, cur) {
enqueue(cur)
}
},
}
lbc.svcLister.Store, lbc.svcController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "services", namespace, fields.Everything()),
&api.Service{}, resyncPeriod, eventHandlers)
lbc.epLister.Store, lbc.epController = framework.NewInformer(
cache.NewListWatchFromClient(
lbc.client, "endpoints", namespace, fields.Everything()),
&api.Endpoints{}, resyncPeriod, eventHandlers)
return &lbc
}
示例14: TestAuthorizationRestrictedAccessForProjectAdmins
func TestAuthorizationRestrictedAccessForProjectAdmins(t *testing.T) {
_, clusterAdminKubeConfig, err := testutil.StartTestMaster()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClient, err := testutil.GetClusterAdminClient(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterAdminClientConfig, err := testutil.GetClusterAdminClientConfig(clusterAdminKubeConfig)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
haroldClient, err := testutil.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "hammer-project", "harold")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
markClient, err := testutil.CreateNewProject(clusterAdminClient, *clusterAdminClientConfig, "mallet-project", "mark")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = haroldClient.DeploymentConfigs("hammer-project").List(labels.Everything(), fields.Everything())
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = markClient.DeploymentConfigs("hammer-project").List(labels.Everything(), fields.Everything())
if (err == nil) || !kapierror.IsForbidden(err) {
t.Fatalf("unexpected error: %v", err)
}
// projects are a special case where a get of a project actually sets a namespace. Make sure that
// the namespace is properly special cased and set for authorization rules
_, err = haroldClient.Projects().Get("hammer-project")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = markClient.Projects().Get("hammer-project")
if (err == nil) || !kapierror.IsForbidden(err) {
t.Fatalf("unexpected error: %v", err)
}
// wait for the project authorization cache to catch the change. It is on a one second period
waitForProject(t, haroldClient, "hammer-project", 1*time.Second, 10)
waitForProject(t, markClient, "mallet-project", 1*time.Second, 10)
}
示例15: NewTokensController
// NewTokensController returns a new *TokensController.
func NewTokensController(cl client.Interface, options TokensControllerOptions) *TokensController {
e := &TokensController{
client: cl,
token: options.TokenGenerator,
rootCA: options.RootCA,
}
e.serviceAccounts, e.serviceAccountController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return e.client.ServiceAccounts(api.NamespaceAll).List(labels.Everything(), fields.Everything())
},
WatchFunc: func(rv string) (watch.Interface, error) {
return e.client.ServiceAccounts(api.NamespaceAll).Watch(labels.Everything(), fields.Everything(), rv)
},
},
&api.ServiceAccount{},
options.ServiceAccountResync,
framework.ResourceEventHandlerFuncs{
AddFunc: e.serviceAccountAdded,
UpdateFunc: e.serviceAccountUpdated,
DeleteFunc: e.serviceAccountDeleted,
},
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
)
tokenSelector := fields.SelectorFromSet(map[string]string{client.SecretType: string(api.SecretTypeServiceAccountToken)})
e.secrets, e.secretController = framework.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return e.client.Secrets(api.NamespaceAll).List(labels.Everything(), tokenSelector)
},
WatchFunc: func(rv string) (watch.Interface, error) {
return e.client.Secrets(api.NamespaceAll).Watch(labels.Everything(), tokenSelector, rv)
},
},
&api.Secret{},
options.SecretResync,
framework.ResourceEventHandlerFuncs{
AddFunc: e.secretAdded,
UpdateFunc: e.secretUpdated,
DeleteFunc: e.secretDeleted,
},
cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc},
)
e.serviceAccountsSynced = e.serviceAccountController.HasSynced
e.secretsSynced = e.secretController.HasSynced
return e
}