本文整理匯總了Golang中vulcan/kubernetes/pkg/util/wait.Poll函數的典型用法代碼示例。如果您正苦於以下問題:Golang Poll函數的具體用法?Golang Poll怎麽用?Golang Poll使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Poll函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: checkExistingRCRecovers
func checkExistingRCRecovers(f Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
if err != nil {
Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
示例2: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "kubernetes/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例3: pollForReadyPods
// pollForReadyPods polls oldRc and newRc each interval and returns the old
// and new ready counts for their pods. If a pod is observed as being ready,
// it's considered ready even if it later becomes notReady.
func (r *RollingUpdater) pollForReadyPods(interval, timeout time.Duration, oldRc, newRc *api.ReplicationController) (int, int, error) {
controllers := []*api.ReplicationController{oldRc, newRc}
oldReady := 0
newReady := 0
err := wait.Poll(interval, timeout, func() (done bool, err error) {
anyReady := false
for _, controller := range controllers {
selector := labels.Set(controller.Spec.Selector).AsSelector()
pods, err := r.c.Pods(controller.Namespace).List(selector, fields.Everything())
if err != nil {
return false, err
}
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
switch controller.Name {
case oldRc.Name:
oldReady++
case newRc.Name:
newReady++
}
anyReady = true
}
}
}
if anyReady {
return true, nil
}
return false, nil
})
return oldReady, newReady, err
}
示例4: migTemplate
// migTemlate (GCE/GKE-only) returns the name of the MIG template that the
// nodes of the cluster use.
func migTemplate() (string, error) {
var errLast error
var templ string
key := "instanceTemplate"
if wait.Poll(poll, singleCallTimeout, func() (bool, error) {
// TODO(mikedanese): make this hit the compute API directly instead of
// shelling out to gcloud.
// An `instance-groups managed describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
"describe",
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
testContext.CloudConfig.NodeInstanceGroup)
if err != nil {
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
return false, nil
}
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "instanceTemplate: url/to/<templ>" and
// return <templ>.
if val := parseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
Logf("MIG group %s using template: %s", testContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
return false, nil
}) != nil {
return "", fmt.Errorf("migTemplate() failed with last error: %v", errLast)
}
return templ, nil
}
示例5: migRollingUpdatePoll
// migRollingUpdatePoll (CKE/GKE-only) polls the progress of the MIG rolling
// update with ID id until it is complete. It returns an error if this takes
// longer than nt times the number of nodes.
func migRollingUpdatePoll(id string, nt time.Duration) error {
// Two keys and a val.
status, progress, done := "status", "statusMessage", "ROLLED_OUT"
start, timeout := time.Now(), nt*time.Duration(testContext.CloudConfig.NumNodes)
var errLast error
Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
if wait.Poll(restartPoll, timeout, func() (bool, error) {
// A `rolling-updates describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", append(migUdpateCmdBase(),
"rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
"describe",
id)...)
if err != nil {
errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err)
Logf("%v", errLast)
return false, nil
}
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "status: <status>" and see whether it's
// done.
Logf("Waiting for MIG rolling update: %s (%v elapsed)",
parseKVLines(output, progress), time.Since(start))
if st := parseKVLines(output, status); st == done {
return true, nil
}
return false, nil
}) != nil {
return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast)
}
Logf("MIG rolling update complete after %v", time.Since(start))
return nil
}
示例6: assertFilesExist
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) {
var failed []string
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{}
for _, fileName := range fileNames {
if _, err := client.Get().
Prefix("proxy").
Resource("pods").
Namespace(pod.Namespace).
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw(); err != nil {
Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
}
示例7: waitForReflection
// Wait till the passFunc confirms that the object it expects to see is in the store.
// Used to observe reflected events.
func waitForReflection(s cache.Store, key string, passFunc func(n interface{}) bool) error {
return wait.Poll(time.Millisecond*10, time.Second*20, func() (bool, error) {
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
return true, nil
}
return false, nil
})
}
示例8: waitForJobFinish
// Wait for job to reach completions.
func waitForJobFinish(c *client.Client, ns, jobName string, completions int) error {
return wait.Poll(poll, jobTimeout, func() (bool, error) {
curr, err := c.Extensions().Jobs(ns).Get(jobName)
if err != nil {
return false, err
}
return curr.Status.Succeeded == completions, nil
})
}
示例9: waitForUpToDateCache
func waitForUpToDateCache(cacher *storage.Cacher, resourceVersion uint64) error {
ready := func() (bool, error) {
result, err := cacher.LastSyncResourceVersion()
if err != nil {
return false, err
}
return result == resourceVersion, nil
}
return wait.Poll(10*time.Millisecond, util.ForeverTestTimeout, ready)
}
示例10: checkNodesReady
// checkNodesReady waits up to nt for expect nodes accessed by c to be ready,
// returning an error if this doesn't happen in time. It returns the names of
// nodes it finds.
func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect.
var nodeList *api.NodeList
var errLast error
start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) {
// Even though listNodes(...) has its own retries, a rolling-update
// (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything())
if errLast != nil {
return false, nil
}
if len(nodeList.Items) != expect {
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
expect, len(nodeList.Items), time.Since(start))
Logf("%v", errLast)
return false, nil
}
return true, nil
}) == nil
nodeNames := make([]string, len(nodeList.Items))
for i, n := range nodeList.Items {
nodeNames[i] = n.ObjectMeta.Name
}
if !found {
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
expect, nt, errLast)
}
Logf("Successfully found %d nodes", expect)
// Next, ensure in parallel that all the nodes are ready. We subtract the
// time we spent waiting above.
timeout := nt - time.Since(start)
result := make(chan bool, len(nodeList.Items))
for _, n := range nodeNames {
n := n
go func() { result <- waitForNodeToBeReady(c, n, timeout) }()
}
failed := false
// TODO(mbforbes): Change to `for range` syntax once we support only Go
// >= 1.4.
for i := range nodeList.Items {
_ = i
if !<-result {
failed = true
}
}
if failed {
return nodeNames, fmt.Errorf("at least one node failed to be ready")
}
return nodeNames, nil
}
示例11: test
func (s *ingManager) test(path string) error {
url := fmt.Sprintf("%v/hostName", path)
httpClient := &http.Client{}
return wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) {
body, err := simpleGET(httpClient, url, "")
if err != nil {
Logf("%v\n%v\n%v", url, body, err)
return false, nil
}
return true, nil
})
}
示例12: getReferencedServiceAccountToken
func getReferencedServiceAccountToken(c *client.Client, ns string, name string, shouldWait bool) (string, string, error) {
tokenName := ""
token := ""
findToken := func() (bool, error) {
user, err := c.ServiceAccounts(ns).Get(name)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
for _, ref := range user.Secrets {
secret, err := c.Secrets(ns).Get(ref.Name)
if errors.IsNotFound(err) {
continue
}
if err != nil {
return false, err
}
if secret.Type != api.SecretTypeServiceAccountToken {
continue
}
name := secret.Annotations[api.ServiceAccountNameKey]
uid := secret.Annotations[api.ServiceAccountUIDKey]
tokenData := secret.Data[api.ServiceAccountTokenKey]
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
tokenName = secret.Name
token = string(tokenData)
return true, nil
}
}
return false, nil
}
if shouldWait {
err := wait.Poll(time.Second, 10*time.Second, findToken)
if err != nil {
return "", "", err
}
} else {
ok, err := findToken()
if err != nil {
return "", "", err
}
if !ok {
return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
}
}
return tokenName, token, nil
}
示例13: retryCmd
// retryCmd runs cmd using args and retries it for up to singleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(poll, singleCallTimeout, func() (bool, error) {
stdout, stderr, err = runCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
示例14: Scale
// Scale updates a Job to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for parallelism to reach desired
// number, which can be less than requested based on job's current progress.
func (scaler *JobScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
job, err := scaler.c.Extensions().Jobs(namespace).Get(name)
if err != nil {
return err
}
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout,
client.JobHasDesiredParallelism(scaler.c, job))
}
return nil
}
示例15: start
func (h *haproxyControllerTester) start(namespace string) (err error) {
// Create a replication controller with the given configuration.
rc := rcFromManifest(h.cfg)
rc.Namespace = namespace
rc.Spec.Template.Labels["rcName"] = rc.Name
// Add the --namespace arg.
// TODO: Remove this when we have proper namespace support.
for i, c := range rc.Spec.Template.Spec.Containers {
rc.Spec.Template.Spec.Containers[i].Args = append(
c.Args, fmt.Sprintf("--namespace=%v", namespace))
Logf("Container args %+v", rc.Spec.Template.Spec.Containers[i].Args)
}
rc, err = h.client.ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return
}
if err = waitForRCPodsRunning(h.client, namespace, h.rcName); err != nil {
return
}
h.rcName = rc.Name
h.rcNamespace = rc.Namespace
// Find the pods of the rc we just created.
labelSelector := labels.SelectorFromSet(
labels.Set(map[string]string{"rcName": h.rcName}))
pods, err := h.client.Pods(h.rcNamespace).List(
labelSelector, fields.Everything())
if err != nil {
return err
}
// Find the external addresses of the nodes the pods are running on.
for _, p := range pods.Items {
wait.Poll(pollInterval, serviceRespondingTimeout, func() (bool, error) {
address, err := getHostExternalAddress(h.client, &p)
if err != nil {
Logf("%v", err)
return false, nil
}
h.address = append(h.address, address)
return true, nil
})
}
if len(h.address) == 0 {
return fmt.Errorf("No external ips found for loadbalancer %v", h.getName())
}
return nil
}