本文整理汇总了Golang中k8s/io/kubernetes/pkg/watch.Until函数的典型用法代码示例。如果您正苦于以下问题:Golang Until函数的具体用法?Golang Until怎么用?Golang Until使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Until函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: waitForQuota
func waitForQuota(t *testing.T, quota *v1.ResourceQuota, clientset *clientset.Clientset) {
w, err := clientset.Core().ResourceQuotas(quota.Namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: quota.Name}))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, err := clientset.Core().ResourceQuotas(quota.Namespace).Create(quota); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(1*time.Minute, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified:
default:
return false, nil
}
switch cast := event.Object.(type) {
case *v1.ResourceQuota:
if len(cast.Status.Hard) > 0 {
return true, nil
}
}
return false, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
示例2: WaitForRunningDeployerPod
// WaitForRunningDeployerPod waits a given period of time until the deployer pod
// for given replication controller is not running.
func WaitForRunningDeployerPod(podClient kcoreclient.PodsGetter, rc *api.ReplicationController, timeout time.Duration) error {
podName := DeployerPodNameForDeployment(rc.Name)
canGetLogs := func(p *api.Pod) bool {
return api.PodSucceeded == p.Status.Phase || api.PodFailed == p.Status.Phase || api.PodRunning == p.Status.Phase
}
pod, err := podClient.Pods(rc.Namespace).Get(podName)
if err == nil && canGetLogs(pod) {
return nil
}
watcher, err := podClient.Pods(rc.Namespace).Watch(
api.ListOptions{
FieldSelector: fields.Set{"metadata.name": podName}.AsSelector(),
},
)
if err != nil {
return err
}
defer watcher.Stop()
if _, err := watch.Until(timeout, watcher, func(e watch.Event) (bool, error) {
if e.Type == watch.Error {
return false, fmt.Errorf("encountered error while watching for pod: %v", e.Object)
}
obj, isPod := e.Object.(*api.Pod)
if !isPod {
return false, errors.New("received unknown object while watching for pods")
}
return canGetLogs(obj), nil
}); err != nil {
return err
}
return nil
}
示例3: runAppArmorTest
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) api.PodStatus {
pod := createPodWithAppArmor(f, profile)
if shouldRun {
// The pod needs to start before it stops, so wait for the longer start timeout.
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
} else {
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
w, err := f.PodClient().Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name}))
framework.ExpectNoError(err)
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Deleted:
return false, errors.NewNotFound(unversioned.GroupResource{Resource: "pods"}, pod.Name)
}
switch t := e.Object.(type) {
case *api.Pod:
if t.Status.Reason == "AppArmor" {
return true, nil
}
}
return false, nil
})
framework.ExpectNoError(err)
}
p, err := f.PodClient().Get(pod.Name)
framework.ExpectNoError(err)
return p.Status
}
示例4: Scale
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: "0"}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
rc := event.Object.(*api.ReplicationController)
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
示例5: WaitForRunningDeployment
// WaitForRunningDeployment waits until the specified deployment is no longer New or Pending. Returns true if
// the deployment became running, complete, or failed within timeout, false if it did not, and an error if any
// other error state occurred. The last observed deployment state is returned.
func WaitForRunningDeployment(rn kclient.ReplicationControllersNamespacer, observed *kapi.ReplicationController, timeout time.Duration) (*kapi.ReplicationController, bool, error) {
fieldSelector := fields.Set{"metadata.name": observed.Name}.AsSelector()
options := kapi.ListOptions{FieldSelector: fieldSelector, ResourceVersion: observed.ResourceVersion}
w, err := rn.ReplicationControllers(observed.Namespace).Watch(options)
if err != nil {
return observed, false, err
}
defer w.Stop()
if _, err := watch.Until(timeout, w, func(e watch.Event) (bool, error) {
if e.Type == watch.Error {
return false, fmt.Errorf("encountered error while watching for replication controller: %v", e.Object)
}
obj, isController := e.Object.(*kapi.ReplicationController)
if !isController {
return false, fmt.Errorf("received unknown object while watching for deployments: %v", obj)
}
observed = obj
switch deployutil.DeploymentStatusFor(observed) {
case api.DeploymentStatusRunning, api.DeploymentStatusFailed, api.DeploymentStatusComplete:
return true, nil
case api.DeploymentStatusNew, api.DeploymentStatusPending:
return false, nil
default:
return false, ErrUnknownDeploymentPhase
}
}); err != nil {
return observed, false, err
}
return observed, true, nil
}
示例6: scale
func scale(t *testing.T, namespace string, clientset *clientset.Clientset) {
target := int32(100)
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: namespace,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &target,
Selector: map[string]string{"foo": "bar"},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{
"foo": "bar",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "busybox",
},
},
},
},
},
}
w, err := clientset.Core().ReplicationControllers(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: rc.Name}))
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if _, err := clientset.Core().ReplicationControllers(namespace).Create(rc); err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(3*time.Minute, w, func(event watch.Event) (bool, error) {
switch event.Type {
case watch.Modified:
default:
return false, nil
}
switch cast := event.Object.(type) {
case *v1.ReplicationController:
fmt.Printf("Found %v of %v replicas\n", int(cast.Status.Replicas), target)
if cast.Status.Replicas == target {
return true, nil
}
}
return false, nil
})
if err != nil {
pods, _ := clientset.Core().Pods(namespace).List(v1.ListOptions{LabelSelector: labels.Everything().String(), FieldSelector: fields.Everything().String()})
t.Fatalf("unexpected error: %v, ended with %v pods", err, len(pods.Items))
}
}
示例7: Scale
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
var updatedResourceVersion string
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, &updatedResourceVersion)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
checkRC := func(rc *api.ReplicationController) bool {
if uint(rc.Spec.Replicas) != newSize {
// the size is changed by other party. Don't need to wait for the new change to complete.
return true
}
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas
}
// If number of replicas doesn't change, then the update may not event
// be sent to underlying databse (we don't send no-op changes).
// In such case, <updatedResourceVersion> will have value of the most
// recent update (which may be far in the past) so we may get "too old
// RV" error from watch or potentially no ReplicationController events
// will be deliver, since it may already be in the expected state.
// To protect from these two, we first issue Get() to ensure that we
// are not already in the expected state.
currentRC, err := scaler.c.ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
if !checkRC(currentRC) {
watchOptions := api.ListOptions{
FieldSelector: fields.OneTermEqualSelector("metadata.name", name),
ResourceVersion: updatedResourceVersion,
}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
return checkRC(event.Object.(*api.ReplicationController)), nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
}
return nil
}
示例8: waitForPod
// waitForPod watches the given pod until the exitCondition is true. Each two seconds
// the tick function is called e.g. for progress output.
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc, tick func(*api.Pod)) (*api.Pod, error) {
w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: name}))
if err != nil {
return nil, err
}
pods := make(chan *api.Pod) // observed pods passed to the exitCondition
defer close(pods)
// wait for the first event, then start the 2 sec ticker and loop
go func() {
pod := <-pods
if pod == nil {
return
}
tick(pod)
t := time.NewTicker(2 * time.Second)
defer t.Stop()
for {
select {
case pod = <-pods:
if pod == nil {
return
}
case _, ok := <-t.C:
if !ok {
return
}
tick(pod)
}
}
}()
intr := interrupt.New(nil, w.Stop)
var result *api.Pod
err = intr.Run(func() error {
ev, err := watch.Until(0, w, func(ev watch.Event) (bool, error) {
c, err := exitCondition(ev)
if c == false && err == nil {
pods <- ev.Object.(*api.Pod) // send to ticker
}
return c, err
})
result = ev.Object.(*api.Pod)
return err
})
return result, err
}
示例9: waitForToken
// waitForToken uses `cmd.Until` to wait for the service account controller to fulfill the token request
func waitForToken(token *api.Secret, serviceAccount *api.ServiceAccount, timeout time.Duration, client kcoreclient.SecretInterface) (*api.Secret, error) {
// there is no provided rounding function, so we use Round(x) === Floor(x + 0.5)
timeoutSeconds := int64(math.Floor(timeout.Seconds() + 0.5))
options := api.ListOptions{
FieldSelector: fields.SelectorFromSet(fields.Set(map[string]string{"metadata.name": token.Name})),
Watch: true,
ResourceVersion: token.ResourceVersion,
TimeoutSeconds: &timeoutSeconds,
}
watcher, err := client.Watch(options)
if err != nil {
return nil, fmt.Errorf("could not begin watch for token: %v", err)
}
event, err := watch.Until(timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type == watch.Error {
return false, fmt.Errorf("encountered error while watching for token: %v", event.Object)
}
eventToken, ok := event.Object.(*api.Secret)
if !ok {
return false, nil
}
if eventToken.Name != token.Name {
return false, nil
}
switch event.Type {
case watch.Modified:
if serviceaccounts.IsValidServiceAccountToken(serviceAccount, eventToken) {
return true, nil
}
case watch.Deleted:
return false, errors.New("token was deleted before fulfillment by service account token controller")
case watch.Added:
return false, errors.New("unxepected action: token was added after initial creation")
}
return false, nil
})
if err != nil {
return nil, err
}
return event.Object.(*api.Secret), nil
}
示例10: TestBootstrapping
func TestBootstrapping(t *testing.T) {
superUser := "admin"
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.GenericConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
masterConfig.GenericConfig.Authenticator = newFakeAuthenticator()
masterConfig.GenericConfig.AuthorizerRBACSuperUser = superUser
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(clusterRoles.Items) == 0 {
t.Fatalf("missing cluster roles")
}
for _, clusterRole := range clusterRoles.Items {
if clusterRole.Name == "cluster-admin" {
return
}
}
t.Errorf("missing cluster-admin: %v", clusterRoles)
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthooks/rbac/bootstrap-roles").DoRaw()
if err != nil {
t.Error(err)
}
t.Errorf("expected %v, got %v", "asdf", string(healthBytes))
}
示例11: waitForPod
// waitForPod watches the given pod until the exitCondition is true
func waitForPod(podClient coreclient.PodsGetter, ns, name string, exitCondition watch.ConditionFunc) (*api.Pod, error) {
w, err := podClient.Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: name}))
if err != nil {
return nil, err
}
intr := interrupt.New(nil, w.Stop)
var result *api.Pod
err = intr.Run(func() error {
ev, err := watch.Until(0, w, func(ev watch.Event) (bool, error) {
return exitCondition(ev)
})
result = ev.Object.(*api.Pod)
return err
})
return result, err
}
示例12: GetFirstPod
// GetFirstPod returns a pod matching the namespace and label selector
// and the number of all pods that match the label selector.
func GetFirstPod(client coreclient.PodsGetter, namespace string, selector labels.Selector, timeout time.Duration, sortBy func([]*v1.Pod) sort.Interface) (*api.Pod, int, error) {
options := api.ListOptions{LabelSelector: selector}
podList, err := client.Pods(namespace).List(options)
if err != nil {
return nil, 0, err
}
pods := []*v1.Pod{}
for i := range podList.Items {
pod := podList.Items[i]
externalPod := &v1.Pod{}
v1.Convert_api_Pod_To_v1_Pod(&pod, externalPod, nil)
pods = append(pods, externalPod)
}
if len(pods) > 0 {
sort.Sort(sortBy(pods))
internalPod := &api.Pod{}
v1.Convert_v1_Pod_To_api_Pod(pods[0], internalPod, nil)
return internalPod, len(podList.Items), nil
}
// Watch until we observe a pod
options.ResourceVersion = podList.ResourceVersion
w, err := client.Pods(namespace).Watch(options)
if err != nil {
return nil, 0, err
}
defer w.Stop()
condition := func(event watch.Event) (bool, error) {
return event.Type == watch.Added || event.Type == watch.Modified, nil
}
event, err := watch.Until(timeout, w, condition)
if err != nil {
return nil, 0, err
}
pod, ok := event.Object.(*api.Pod)
if !ok {
return nil, 0, fmt.Errorf("%#v is not a pod event", event)
}
return pod, 1, nil
}
示例13: TestBootstrapping
func TestBootstrapping(t *testing.T) {
superUser := "admin"
masterConfig := framework.NewIntegrationTestMasterConfig()
masterConfig.Authorizer = newRBACAuthorizer(t, superUser, masterConfig)
masterConfig.Authenticator = newFakeAuthenticator()
masterConfig.AuthorizerRBACSuperUser = superUser
_, s := framework.RunAMaster(masterConfig)
defer s.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{BearerToken: superUser, Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
watcher, err := clientset.Rbac().ClusterRoles().Watch(api.ListOptions{ResourceVersion: "0"})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watch.Until(30*time.Second, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
return true, nil
})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
clusterRoles, err := clientset.Rbac().ClusterRoles().List(api.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(clusterRoles.Items) == 0 {
t.Fatalf("missing cluster roles")
}
for _, clusterRole := range clusterRoles.Items {
if clusterRole.Name == "cluster-admin" {
return
}
}
t.Errorf("missing cluster-admin: %v", clusterRoles)
}
示例14: DeleteAndWaitForNamespaceTermination
func DeleteAndWaitForNamespaceTermination(c *kclient.Client, name string) error {
w, err := c.Namespaces().Watch(kapi.ListOptions{})
if err != nil {
return err
}
if err := c.Namespaces().Delete(name); err != nil {
return err
}
_, err = watch.Until(30*time.Second, w, func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
namespace, ok := event.Object.(*kapi.Namespace)
if !ok {
return false, nil
}
return namespace.Name == name, nil
})
return err
}
示例15: validateDNSResults
func validateDNSResults(f *e2e.Framework, pod *api.Pod, fileNames sets.String, expect int) {
By("submitting the pod to kubernetes")
podClient := f.Client.Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
updated, err := podClient.Create(pod)
if err != nil {
e2e.Failf("Failed to create %s pod: %v", pod.Name, err)
}
w, err := f.Client.Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: pod.Name, ResourceVersion: updated.ResourceVersion}))
if err != nil {
e2e.Failf("Failed: %v", err)
}
if _, err = watch.Until(e2e.PodStartTimeout, w, PodSucceeded); err != nil {
e2e.Failf("Failed: %v", err)
}
By("retrieving the pod logs")
r, err := podClient.GetLogs(pod.Name, &api.PodLogOptions{Container: "querier"}).Stream()
if err != nil {
e2e.Failf("Failed to get pod logs %s: %v", pod.Name, err)
}
out, err := ioutil.ReadAll(r)
if err != nil {
e2e.Failf("Failed to read pod logs %s: %v", pod.Name, err)
}
// Try to find results for each expected name.
By("looking for the results for each expected name from probiers")
if err := assertLinesExist(fileNames, expect, bytes.NewBuffer(out)); err != nil {
e2e.Logf("Got results from pod:\n%s", out)
e2e.Failf("Unexpected results: %v", err)
}
e2e.Logf("DNS probes using %s succeeded\n", pod.Name)
}