本文整理汇总了Golang中k8s/io/kubernetes/pkg/fields.OneTermEqualSelector函数的典型用法代码示例。如果您正苦于以下问题:Golang OneTermEqualSelector函数的具体用法?Golang OneTermEqualSelector怎么用?Golang OneTermEqualSelector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了OneTermEqualSelector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewDockerRegistryServiceController
// NewDockerRegistryServiceController returns a new *DockerRegistryServiceController.
func NewDockerRegistryServiceController(cl client.Interface, options DockerRegistryServiceControllerOptions) *DockerRegistryServiceController {
e := &DockerRegistryServiceController{
client: cl,
dockercfgController: options.DockercfgController,
registryLocationQueue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
secretsToUpdate: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
serviceName: options.RegistryServiceName,
serviceNamespace: options.RegistryNamespace,
dockerURLsIntialized: options.DockerURLsIntialized,
}
e.serviceCache, e.serviceController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).List(opts)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
opts.FieldSelector = fields.OneTermEqualSelector("metadata.name", options.RegistryServiceName)
return e.client.Services(options.RegistryNamespace).Watch(opts)
},
},
&kapi.Service{},
options.Resync,
framework.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
UpdateFunc: func(old, cur interface{}) {
e.enqueueRegistryLocationQueue()
},
DeleteFunc: func(obj interface{}) {
e.enqueueRegistryLocationQueue()
},
},
)
e.servicesSynced = e.serviceController.HasSynced
e.syncRegistryLocationHandler = e.syncRegistryLocationChange
dockercfgOptions := kapi.ListOptions{FieldSelector: fields.SelectorFromSet(map[string]string{kapi.SecretTypeField: string(kapi.SecretTypeDockercfg)})}
e.secretCache, e.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(opts kapi.ListOptions) (runtime.Object, error) {
return e.client.Secrets(kapi.NamespaceAll).List(dockercfgOptions)
},
WatchFunc: func(opts kapi.ListOptions) (watch.Interface, error) {
return e.client.Secrets(kapi.NamespaceAll).Watch(dockercfgOptions)
},
},
&kapi.Secret{},
options.Resync,
framework.ResourceEventHandlerFuncs{},
)
e.secretsSynced = e.secretController.HasSynced
e.syncSecretHandler = e.syncSecretUpdate
return e
}
示例2: TestClusterPolicyBindingListRespectingFields
// TestClusterPolicyBindingListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyBindingCache
// will return all clusterPolicyBindings matching that field
func TestClusterPolicyBindingListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicybindingcache()
defer close(cacheChannel)
var clusterPolicyBindings *authorizationapi.ClusterPolicyBindingList
var err error
name := "uniqueClusterPolicyBindingName"
field := fields.OneTermEqualSelector("metadata.name", name)
util.Until(func() {
clusterPolicyBindings, err = testCache.List(&unversioned.ListOptions{FieldSelector: unversioned.FieldSelector{Selector: field}})
if (err == nil) &&
(clusterPolicyBindings != nil) &&
(len(clusterPolicyBindings.Items) == 1) &&
(clusterPolicyBindings.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting clusterPolicyBinding with fieldSelector using ReadOnlyClusterBindingCache: %v", err)
case clusterPolicyBindings == nil:
t.Error("ClusterPolicyBindingList using fieldSelector is nil")
case len(clusterPolicyBindings.Items) != 1:
t.Errorf("Expected clusterPolicyBindingList using fieldSelector to contain 1 items, had %d", len(clusterPolicyBindings.Items))
case clusterPolicyBindings.Items[0].Name != name:
t.Errorf("Expected clusterPolicyBinding to have name '%s', had '%s'", name, clusterPolicyBindings.Items[0].Name)
}
}
示例3: TestPolicyBindingListRespectingFields
// TestPolicyBindingListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyBindingCache
// will return all policyBindings in that namespace matching that field
func TestPolicyBindingListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicybindingcache()
defer close(cacheChannel)
var policyBindings *authorizationapi.PolicyBindingList
var err error
name := "uniquePolicyBindingName"
namespace := "namespaceTwo"
label := labels.Everything()
field := fields.OneTermEqualSelector("metadata.name", name)
util.Until(func() {
policyBindings, err = testCache.List(label, field, namespace)
if (err == nil) &&
(policyBindings != nil) &&
(len(policyBindings.Items) == 1) &&
(policyBindings.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policyBindingList using ReadOnlyPolicyBindingCache: %v", err)
case policyBindings == nil:
t.Error("PolicyBindingList is nil.")
case len(policyBindings.Items) != 1:
t.Errorf("Expected policyBindingList to have 1 item, had %d", len(policyBindings.Items))
case policyBindings.Items[0].Name != name:
t.Errorf("Expected policyBinding name to be '%s', was '%s'", name, policyBindings.Items[0].Name)
}
}
示例4: WaitForRunningBuild
// WaitForRunningBuild waits until the specified build is no longer New or Pending. Returns true if
// the build ran within timeout, false if it did not, and an error if any other error state occurred.
// The last observed Build state is returned.
func WaitForRunningBuild(watcher rest.Watcher, ctx kapi.Context, build *api.Build, timeout time.Duration) (*api.Build, bool, error) {
fieldSelector := fields.OneTermEqualSelector("metadata.name", build.Name)
options := &kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: build.ResourceVersion}
w, err := watcher.Watch(ctx, options)
if err != nil {
return nil, false, err
}
defer w.Stop()
ch := w.ResultChan()
observed := build
expire := time.After(timeout)
for {
select {
case event := <-ch:
obj, ok := event.Object.(*api.Build)
if !ok {
return observed, false, fmt.Errorf("received unknown object while watching for builds")
}
observed = obj
switch obj.Status.Phase {
case api.BuildPhaseRunning, api.BuildPhaseComplete, api.BuildPhaseFailed, api.BuildPhaseError, api.BuildPhaseCancelled:
return observed, true, nil
case api.BuildPhaseNew, api.BuildPhasePending:
default:
return observed, false, ErrUnknownBuildPhase
}
case <-expire:
return observed, false, nil
}
}
}
示例5: StartPods
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {
glog.Infof("StartPods took %v with numPods %d", time.Since(start), numPods)
}()
hostField := fields.OneTermEqualSelector(client.PodHost, host)
pods, err := restClient.Pods(TestNS).List(labels.Everything(), hostField)
if err != nil || len(pods.Items) == numPods {
return err
}
glog.Infof("Found %d pods that match host %v, require %d", len(pods.Items), hostField, numPods)
// For the sake of simplicity, assume all pods in TestNS have selectors matching TestRCManifest.
controller := RCFromManifest(TestRCManifest)
// Make the rc unique to the given host.
controller.Spec.Replicas = numPods
controller.Spec.Template.Spec.NodeName = host
controller.Name = controller.Name + host
controller.Spec.Selector["host"] = host
controller.Spec.Template.Labels["host"] = host
if rc, err := StartRC(controller, restClient); err != nil {
return err
} else {
// Delete the rc, otherwise when we restart master components for the next benchmark
// the rc controller will race with the pods controller in the rc manager.
return restClient.ReplicationControllers(TestNS).Delete(rc.Name)
}
}
示例6: markAllPodsNotReady
// update ready status of all pods running on given node from master
// return true if success
func (nc *NodeController) markAllPodsNotReady(nodeName string) error {
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(client.PodHost, nodeName)}
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(opts)
if err != nil {
return err
}
errMsg := []string{}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
for i, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady {
pod.Status.Conditions[i].Status = api.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
pod, err := nc.kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil {
glog.Warningf("Failed to updated status for pod %q: %v", format.Pod(pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
break
}
}
}
if len(errMsg) == 0 {
return nil
}
return fmt.Errorf("%v", strings.Join(errMsg, "; "))
}
示例7: deletePods
// deletePods will delete all pods from master running on given node, and return true
// if any pods were deleted.
func (nc *NodeController) deletePods(nodeName string) (bool, error) {
remaining := false
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.OneTermEqualSelector(client.PodHost, nodeName))
if err != nil {
return remaining, err
}
if len(pods.Items) > 0 {
nc.recordNodeEvent(nodeName, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
}
for _, pod := range pods.Items {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
continue
}
// if the pod has already been deleted, ignore it
if pod.DeletionGracePeriodSeconds != nil {
continue
}
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
nc.recorder.Eventf(&pod, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := nc.kubeClient.Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err
}
remaining = true
}
return remaining, nil
}
示例8: NewDockercfgController
// NewDockercfgController returns a new *DockercfgController.
func NewDockercfgController(cl kclientset.Interface, options DockercfgControllerOptions) *DockercfgController {
e := &DockercfgController{
client: cl,
queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
dockerURLsIntialized: options.DockerURLsIntialized,
}
var serviceAccountCache cache.Store
serviceAccountCache, e.serviceAccountController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
},
},
&api.ServiceAccount{},
options.Resync,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
serviceAccount := obj.(*api.ServiceAccount)
glog.V(5).Infof("Adding service account %s", serviceAccount.Name)
e.enqueueServiceAccount(serviceAccount)
},
UpdateFunc: func(old, cur interface{}) {
serviceAccount := cur.(*api.ServiceAccount)
glog.V(5).Infof("Updating service account %s", serviceAccount.Name)
// Resync on service object relist.
e.enqueueServiceAccount(serviceAccount)
},
},
)
e.serviceAccountCache = NewEtcdMutationCache(serviceAccountCache)
tokenSecretSelector := fields.OneTermEqualSelector(api.SecretTypeField, string(api.SecretTypeServiceAccountToken))
e.secretCache, e.secretController = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Core().Secrets(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
options.FieldSelector = tokenSecretSelector
return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
},
},
&api.Secret{},
options.Resync,
cache.ResourceEventHandlerFuncs{
AddFunc: func(cur interface{}) { e.handleTokenSecretUpdate(nil, cur) },
UpdateFunc: func(old, cur interface{}) { e.handleTokenSecretUpdate(old, cur) },
DeleteFunc: e.handleTokenSecretDelete,
},
)
e.syncHandler = e.syncServiceAccount
return e
}
示例9: hasPods
// returns true if the provided node still has pods scheduled to it, or an error if
// the server could not be contacted.
func (nc *NodeController) hasPods(nodeName string) (bool, error) {
pods, err := nc.kubeClient.Pods(api.NamespaceAll).List(labels.Everything(), fields.OneTermEqualSelector(client.PodHost, nodeName))
if err != nil {
return false, err
}
return len(pods.Items) > 0, nil
}
示例10: Scale
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: "0"}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
rc := event.Object.(*api.ReplicationController)
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
示例11: TestClusterPolicyListRespectingFields
// TestClusterPolicyListRespectingFields tests that a List() call, filtered with a field to the ReadOnlyClusterPolicyCache
// will return all clusterPolicies matching that field
func TestClusterPolicyListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlyclusterpolicycache()
defer close(cacheChannel)
var clusterPolicies *authorizationapi.ClusterPolicyList
var err error
name := "uniqueClusterPolicyName"
label := labels.Everything()
field := fields.OneTermEqualSelector("metadata.name", name)
util.Until(func() {
clusterPolicies, err = testCache.List(label, field)
if (err == nil) &&
(clusterPolicies != nil) &&
(len(clusterPolicies.Items) == 1) &&
(clusterPolicies.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting clusterPolicyList with fieldSelector using ReadOnlyClusterPolicyCache: %v", err)
case clusterPolicies == nil:
t.Error("ClusterPolicyList is nil.")
case len(clusterPolicies.Items) != 1:
t.Errorf("Expected clusterPolicyList to contain 2 clusterPolicies, contained %d", len(clusterPolicies.Items))
case clusterPolicies.Items[0].Name != name:
t.Errorf("Expected field-selected clusterPolicy name to be '%s', was '%s'", name, clusterPolicies.Items[0].Name)
}
}
示例12: NewDockercfgDeletedController
// NewDockercfgDeletedController returns a new *DockercfgDeletedController.
func NewDockercfgDeletedController(cl client.Interface, options DockercfgDeletedControllerOptions) *DockercfgDeletedController {
e := &DockercfgDeletedController{
client: cl,
}
dockercfgSelector := fields.OneTermEqualSelector(client.SecretType, string(api.SecretTypeDockercfg))
_, e.secretController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
opts := api.ListOptions{FieldSelector: dockercfgSelector}
return e.client.Secrets(api.NamespaceAll).List(opts)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
opts := api.ListOptions{FieldSelector: dockercfgSelector, ResourceVersion: options.ResourceVersion}
return e.client.Secrets(api.NamespaceAll).Watch(opts)
},
},
&api.Secret{},
options.Resync,
framework.ResourceEventHandlerFuncs{
DeleteFunc: e.secretDeleted,
},
)
return e
}
示例13: TestPolicyListRespectingFields
// TestPolicyListRespectingFields tests that a List() call for some namespace, filtered with a field to the ReadOnlyPolicyCache
// will return all policies in that namespace matching that field
func TestPolicyListRespectingFields(t *testing.T) {
testCache, cacheChannel, testChannel := beforeTestingSetup_readonlypolicycache()
defer close(cacheChannel)
var policies *authorizationapi.PolicyList
var err error
name := "uniquePolicyName"
namespace := "namespaceTwo"
field := fields.OneTermEqualSelector("metadata.name", name)
utilwait.Until(func() {
policies, err = testCache.List(&kapi.ListOptions{FieldSelector: field}, namespace)
if (err == nil) &&
(policies != nil) &&
(len(policies.Items) == 1) &&
(policies.Items[0].Name == name) {
close(testChannel)
}
}, 1*time.Millisecond, testChannel)
switch {
case err != nil:
t.Errorf("Error getting policies using ReadOnlyPolicyCache: %v", err)
case policies == nil:
t.Error("PoliciesList is nil")
case len(policies.Items) != 1:
t.Errorf("Expected policyList to have 1 policy, had %d", len(policies.Items))
case policies.Items[0].Name != name:
t.Errorf("Expected policy name to be '%s', was '%s'", name, policies.Items[0].Name)
}
}
示例14: hasPods
// returns true if the provided node still has pods scheduled to it, or an error if
// the server could not be contacted.
func (nc *NodeController) hasPods(nodeName string) (bool, error) {
selector := fields.OneTermEqualSelector(client.PodHost, nodeName)
options := api.ListOptions{FieldSelector: selector}
pods, err := nc.kubeClient.Core().Pods(api.NamespaceAll).List(options)
if err != nil {
return false, err
}
return len(pods.Items) > 0, nil
}
示例15: Scale
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
var updatedResourceVersion string
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
checkRC := func(rc *api.ReplicationController) bool {
if uint(rc.Spec.Replicas) != newSize {
// the size is changed by other party. Don't need to wait for the new change to complete.
return true
}
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
}
// If number of replicas doesn't change, then the update may not event
// be sent to underlying databse (we don't send no-op changes).
// In such case, <updatedResourceVersion> will have value of the most
// recent update (which may be far in the past) so we may get "too old
// RV" error from watch or potentially no ReplicationController events
// will be deliver, since it may already be in the expected state.
// To protect from these two, we first issue Get() to ensure that we
// are not already in the expected state.
currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if !checkRC(currentRC) {
watchOptions := api.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
ResourceVersion: updatedResourceVersion,
}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
return checkRC(event.Object.(*api.ReplicationController)), nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
}
return nil
}