本文整理汇总了Golang中github.com/qingyuancloud/QingYuan/pkg/util/wait.Poll函数的典型用法代码示例。如果您正苦于以下问题:Golang Poll函数的具体用法?Golang Poll怎么用?Golang Poll使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Poll函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: checkExistingRCRecovers
func checkExistingRCRecovers(f Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
if err != nil {
Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if api.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
示例2: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "qingyuan/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*30, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil)
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例3: testNotReachable
func testNotReachable(ip string, port int) {
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for non-reachability check (%s)", url)
}
if port == 0 {
Failf("Got port==0 for non-reachability check (%s)", url)
}
By(fmt.Sprintf("Waiting up to %v for %s to be *not* reachable", podStartTimeout, url))
expectNoError(wait.Poll(poll, podStartTimeout, func() (bool, error) {
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Successfully waited for the url %s to be unreachable.", url)
return true, nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
Logf("Expecting %s to be unreachable but was reachable and got an error reading response: %v", url, err)
return false, nil
}
Logf("Able to reach service %s when should no longer have been reachable, status:%d and body: %s", url, resp.Status, string(body))
return false, nil
}))
}
示例4: testReachable
func testReachable(ip string, port int) {
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", url)
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", url)
}
By(fmt.Sprintf("Waiting up to %v for %s to be reachable", podStartTimeout, url))
start := time.Now()
expectNoError(wait.Poll(poll, podStartTimeout, func() (bool, error) {
resp, err := httpGetNoConnectionPool(url)
if err != nil {
Logf("Got error waiting for reachability of %s: %v (%v)", url, err, time.Since(start))
return false, nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
Logf("Got error reading response from %s: %v", url, err)
return false, nil
}
if resp.StatusCode != 200 {
return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s", resp.Status, url, string(body))
}
if !strings.Contains(string(body), "test-webserver") {
return false, fmt.Errorf("received response body without expected substring 'test-webserver': %s", string(body))
}
Logf("Successfully reached %v", url)
return true, nil
}))
}
示例5: migTemplate
// migTemlate (GCE/GKE-only) returns the name of the MIG template that the
// nodes of the cluster use.
func migTemplate() (string, error) {
var errLast error
var templ string
key := "instanceTemplate"
// TODO(mbforbes): Refactor this to use cluster_upgrade.go:retryCmd(...)
if wait.Poll(poll, singleCallTimeout, func() (bool, error) {
// TODO(mbforbes): make this hit the compute API directly instead of
// shelling out to gcloud.
o, err := exec.Command("gcloud", "preview", "managed-instance-groups",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
"describe",
testContext.CloudConfig.NodeInstanceGroup).CombinedOutput()
if err != nil {
errLast = fmt.Errorf("gcloud preview managed-instance-groups describe call failed with err: %v", err)
return false, nil
}
output := string(o)
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "instanceTemplate: url/to/<templ>" and
// return <templ>.
if val := parseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
Logf("MIG group %s using template: %s", testContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
return false, nil
}) != nil {
return "", fmt.Errorf("migTemplate() failed with last error: %v", errLast)
}
return templ, nil
}
示例6: migRollingUpdatePoll
// migRollingUpdatePoll (CKE/GKE-only) polls the progress of the MIG rolling
// update with ID id until it is complete. It returns an error if this takes
// longer than nt times the number of nodes.
func migRollingUpdatePoll(id string, nt time.Duration) error {
// Two keys and a val.
status, progress, done := "status", "statusMessage", "ROLLED_OUT"
start, timeout := time.Now(), nt*time.Duration(testContext.CloudConfig.NumNodes)
var errLast error
Logf("Waiting up to %v for MIG rolling update to complete.", timeout)
// TODO(mbforbes): Refactor this to use cluster_upgrade.go:retryCmd(...)
if wait.Poll(restartPoll, timeout, func() (bool, error) {
o, err := exec.Command("gcloud", "preview", "rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
"describe",
id).CombinedOutput()
if err != nil {
errLast = fmt.Errorf("Error calling rolling-updates describe %s: %v", id, err)
Logf("%v", errLast)
return false, nil
}
output := string(o)
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "status: <status>" and see whether it's
// done.
Logf("Waiting for MIG rolling update: %s (%v elapsed)",
parseKVLines(output, progress), time.Since(start))
if st := parseKVLines(output, status); st == done {
return true, nil
}
return false, nil
}) != nil {
return fmt.Errorf("timeout waiting %v for MIG rolling update to complete. Last error: %v", timeout, errLast)
}
Logf("MIG rolling update complete after %v", time.Since(start))
return nil
}
示例7: extinguish
func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
var err error
for n := 0; n < totalNS; n += 1 {
_, err = createTestingNS(fmt.Sprintf("nslifetest-%v", n), c)
Expect(err).NotTo(HaveOccurred())
}
//Wait 10 seconds, then SEND delete requests for all the namespaces.
time.Sleep(time.Duration(10 * time.Second))
nsList, err := c.Namespaces().List(labels.Everything(), fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, item := range nsList.Items {
if strings.Contains(item.Name, "nslifetest") {
if err := c.Namespaces().Delete(item.Name); err != nil {
Failf("Failed deleting error ::: --- %v ", err)
}
}
Logf("namespace : %v api call to delete is complete ", item)
}
//Now POLL until all namespaces have been eradicated.
expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) {
if rem, err := countRemaining(c, "nslifetest"); err != nil || rem > maxAllowedAfterDel {
Logf("Remaining namespaces : %v", rem)
return false, err
} else {
return true, nil
}
}))
}
示例8: assertFilesExist
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) {
var failed []string
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{}
for _, fileName := range fileNames {
if _, err := client.Get().
Prefix("proxy").
Resource("pods").
Namespace(pod.Namespace).
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw(); err != nil {
Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
}
示例9: waitForReflection
// Wait till the passFunc confirms that the object it expects to see is in the store.
// Used to observe reflected events.
func waitForReflection(s cache.Store, key string, passFunc func(n interface{}) bool) error {
return wait.Poll(time.Millisecond*10, time.Second*20, func() (bool, error) {
if n, _, err := s.GetByKey(key); err == nil && passFunc(n) {
return true, nil
}
return false, nil
})
}
示例10: updateAndWait
func (r *RollingUpdater) updateAndWait(rc *api.ReplicationController, interval, timeout time.Duration) (*api.ReplicationController, error) {
rc, err := r.c.UpdateReplicationController(r.ns, rc)
if err != nil {
return nil, err
}
if err = wait.Poll(interval, timeout, r.c.ControllerHasDesiredReplicas(rc)); err != nil {
return nil, err
}
return r.c.GetReplicationController(r.ns, rc.ObjectMeta.Name)
}
示例11: checkNodesReady
// checkNodesReady waits up to nt for expect nodes accessed by c to be ready,
// returning an error if this doesn't happen in time. It returns the names of
// nodes it finds.
func checkNodesReady(c *client.Client, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect.
var nodeList *api.NodeList
var errLast error
start := time.Now()
found := wait.Poll(poll, nt, func() (bool, error) {
// Even though listNodes(...) has its own retries, a rolling-update
// (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
nodeList, errLast = listNodes(c, labels.Everything(), fields.Everything())
if errLast != nil {
return false, nil
}
if len(nodeList.Items) != expect {
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
expect, len(nodeList.Items), time.Since(start))
Logf("%v", errLast)
return false, nil
}
return true, nil
}) == nil
nodeNames := make([]string, len(nodeList.Items))
for i, n := range nodeList.Items {
nodeNames[i] = n.ObjectMeta.Name
}
if !found {
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
expect, nt, errLast)
}
Logf("Successfully found %d nodes", expect)
// Next, ensure in parallel that all the nodes are ready. We subtract the
// time we spent waiting above.
timeout := nt - time.Since(start)
result := make(chan bool, len(nodeList.Items))
for _, n := range nodeNames {
n := n
go func() { result <- waitForNodeToBeReady(c, n, timeout) }()
}
failed := false
// TODO(mbforbes): Change to `for range` syntax once we support only Go
// >= 1.4.
for i := range nodeList.Items {
_ = i
if !<-result {
failed = true
}
}
if failed {
return nodeNames, fmt.Errorf("at least one node failed to be ready")
}
return nodeNames, nil
}
示例12: getReferencedServiceAccountToken
func getReferencedServiceAccountToken(c *client.Client, ns string, name string, shouldWait bool) (string, string, error) {
tokenName := ""
token := ""
findToken := func() (bool, error) {
user, err := c.ServiceAccounts(ns).Get(name)
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
for _, ref := range user.Secrets {
secret, err := c.Secrets(ns).Get(ref.Name)
if errors.IsNotFound(err) {
continue
}
if err != nil {
return false, err
}
if secret.Type != api.SecretTypeServiceAccountToken {
continue
}
name := secret.Annotations[api.ServiceAccountNameKey]
uid := secret.Annotations[api.ServiceAccountUIDKey]
tokenData := secret.Data[api.ServiceAccountTokenKey]
if name == user.Name && uid == string(user.UID) && len(tokenData) > 0 {
tokenName = secret.Name
token = string(tokenData)
return true, nil
}
}
return false, nil
}
if shouldWait {
err := wait.Poll(time.Second, 10*time.Second, findToken)
if err != nil {
return "", "", err
}
} else {
ok, err := findToken()
if err != nil {
return "", "", err
}
if !ok {
return "", "", fmt.Errorf("No token found for %s/%s", ns, name)
}
}
return tokenName, token, nil
}
示例13: retryCmd
// retryCmd runs cmd using args and retries it for up to singleCallTimeout if
// it returns an error. It returns stdout and stderr.
func retryCmd(command string, args ...string) (string, string, error) {
var err error
stdout, stderr := "", ""
wait.Poll(poll, singleCallTimeout, func() (bool, error) {
stdout, stderr, err = runCmd(command, args...)
if err != nil {
Logf("Got %v", err)
return false, nil
}
return true, nil
})
return stdout, stderr, err
}
示例14: migRollingUpdateStart
// migRollingUpdateStart (GCE/GKE-only) starts a MIG rolling update using templ
// as the new template, waiting up to nt per node, and returns the ID of that
// update.
func migRollingUpdateStart(templ string, nt time.Duration) (string, error) {
var errLast error
var id string
prefix, suffix := "Started [", "]."
// TODO(mbforbes): Refactor this to use cluster_upgrade.go:retryCmd(...)
if err := wait.Poll(poll, singleCallTimeout, func() (bool, error) {
// TODO(mbforbes): make this hit the compute API directly instead of
// shelling out to gcloud.
// NOTE(mbforbes): If you are changing this gcloud command, update
// cluster/gce/upgrade.sh to match this EXACTLY.
o, err := exec.Command("gcloud", "preview", "rolling-updates",
fmt.Sprintf("--project=%s", testContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", testContext.CloudConfig.Zone),
"start",
// Required args.
fmt.Sprintf("--group=%s", testContext.CloudConfig.NodeInstanceGroup),
fmt.Sprintf("--template=%s", templ),
// Optional args to fine-tune behavior.
fmt.Sprintf("--instance-startup-timeout=%ds", int(nt.Seconds())),
// NOTE: We can speed up this process by increasing
// --max-num-concurrent-instances.
fmt.Sprintf("--max-num-concurrent-instances=%d", 1),
fmt.Sprintf("--max-num-failed-instances=%d", 0),
fmt.Sprintf("--min-instance-update-time=%ds", 0)).CombinedOutput()
if err != nil {
errLast = fmt.Errorf("gcloud preview rolling-updates call failed with err: %v", err)
return false, nil
}
output := string(o)
// The 'start' call probably succeeded; parse the output and try to find
// the line that looks like "Started [url/to/<id>]." and return <id>.
for _, line := range strings.Split(output, "\n") {
// As a sanity check, ensure the line starts with prefix and ends
// with suffix.
if strings.Index(line, prefix) != 0 || strings.Index(line, suffix) != len(line)-len(suffix) {
continue
}
url := strings.Split(strings.TrimSuffix(strings.TrimPrefix(line, prefix), suffix), "/")
id = url[len(url)-1]
Logf("Started MIG rolling update; ID: %s", id)
return true, nil
}
errLast = fmt.Errorf("couldn't find line like '%s ... %s' in output to MIG rolling-update start. Output: %s",
prefix, suffix, output)
return false, nil
}); err != nil {
return "", fmt.Errorf("migRollingUpdateStart() failed with last error: %v", errLast)
}
return id, nil
}
示例15: runReplicationControllerTest
func runReplicationControllerTest(c *client.Client) {
clientAPIVersion := c.APIVersion()
data, err := ioutil.ReadFile("cmd/integration/" + clientAPIVersion + "-controller.json")
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
var controller api.ReplicationController
if err := api.Scheme.DecodeInto(data, &controller); err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Creating replication controllers")
updated, err := c.ReplicationControllers("test").Create(&controller)
if err != nil {
glog.Fatalf("Unexpected error: %v", err)
}
glog.Infof("Done creating replication controllers")
// In practice the controller doesn't need 60s to create a handful of pods, but network latencies on CI
// systems have been observed to vary unpredictably, so give the controller enough time to create pods.
// Our e2e scalability tests will catch controllers that are *actually* slow.
if err := wait.Poll(time.Second, time.Second*60, client.ControllerHasDesiredReplicas(c, updated)); err != nil {
glog.Fatalf("FAILED: pods never created %v", err)
}
// Poll till we can retrieve the status of all pods matching the given label selector from their minions.
// This involves 3 operations:
// - The scheduler must assign all pods to a minion
// - The assignment must reflect in a `List` operation against the apiserver, for labels matching the selector
// - We need to be able to query the qinglet on that minion for information about the pod
if err := wait.Poll(
time.Second, time.Second*30, podsOnMinions(c, "test", labels.Set(updated.Spec.Selector).AsSelector())); err != nil {
glog.Fatalf("FAILED: pods never started running %v", err)
}
glog.Infof("Pods created")
}