本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/wait.Poll函数的典型用法代码示例。如果您正苦于以下问题:Golang Poll函数的具体用法?Golang Poll怎么用?Golang Poll使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Poll函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: checkExistingRCRecovers
func checkExistingRCRecovers(f *Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
if err != nil {
Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
expectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}
示例2: CreateAssets
func CreateAssets(manifestDir string, timeout time.Duration) error {
upFn := func() (bool, error) {
if err := apiTest(); err != nil {
glog.Warningf("Unable to determine api-server version: %v", err)
return false, nil
}
return true, nil
}
createFn := func() (bool, error) {
err := createAssets(manifestDir)
if err != nil {
glog.Warningf("Error creating assets: %v", err)
return !shouldRetry(err), nil
}
return true, nil
}
start := time.Now()
if err := wait.Poll(5*time.Second, timeout, upFn); err != nil {
return fmt.Errorf("API Server unavailable: %v", err)
}
timeout = timeout - time.Since(start)
if err := wait.Poll(5*time.Second, timeout, createFn); err != nil {
return fmt.Errorf("Failed to create assets: %v", err)
}
return nil
}
示例3: Scale
// Scale updates a replication controller created by the DeploymentConfig with the provided namespace/name,
// to a new size, with optional precondition check (if preconditions is not nil),optional retries (if retry
// is not nil), and then optionally waits for it's replica count to reach the new value (if wait is not nil).
func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
if preconditions == nil {
preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
if scaleErr := err.(kubectl.ControllerScaleError); kerrors.IsNotFound(scaleErr.ActualError) {
glog.Infof("No deployment found for dc/%s. Scaling the deployment configuration template...", name)
dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
if err != nil {
return err
}
dc.Template.ControllerTemplate.Replicas = int(newSize)
if _, err := scaler.dcClient.DeploymentConfigs(namespace).Update(dc); err != nil {
return err
}
return nil
}
return err
}
if waitForReplicas != nil {
rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(name)
if err != nil {
return err
}
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, kclient.ControllerHasDesiredReplicas(scaler.clientInterface, rc))
}
return nil
}
示例4: Scale
// Scale updates a deployment to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for the status to reach desired count.
func (scaler *DeploymentScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize, nil)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
deployment, err := scaler.c.Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return err
}
err = wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, client.DeploymentHasDesiredReplicas(scaler.c, deployment))
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
示例5: Scale
// Scale updates the DeploymentConfig with the provided namespace/name, to a
// new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for its
// deployment replica count to reach the new value (if wait is not nil).
func (scaler *DeploymentConfigScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, waitForReplicas *kubectl.RetryParams) error {
if preconditions == nil {
preconditions = &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &kubectl.RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := kubectl.ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.Poll(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
dc, err := scaler.dcClient.DeploymentConfigs(namespace).Get(name)
if err != nil {
return err
}
rc, err := scaler.rcClient.ReplicationControllers(namespace).Get(util.LatestDeploymentNameForConfig(dc))
if err != nil {
return err
}
return wait.Poll(waitForReplicas.Interval, waitForReplicas.Timeout, controllerHasSpecifiedReplicas(scaler.clientInterface, rc, dc.Template.ControllerTemplate.Replicas))
}
return nil
}
示例6: runSchedulerNoPhantomPodsTest
func runSchedulerNoPhantomPodsTest(client *client.Client) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "c1",
Image: "kubernetes/pause",
Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999},
},
ImagePullPolicy: api.PullIfNotPresent,
},
},
},
}
// Assuming we only have two kublets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second
// pod.
pod.ObjectMeta.Name = "phantom.foo"
foo, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, foo.Namespace, foo.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
pod.ObjectMeta.Name = "phantom.bar"
bar, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, bar.Namespace, bar.Name)); err != nil {
glog.Fatalf("FAILED: pod never started running %v", err)
}
// Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
}
pod.ObjectMeta.Name = "phantom.baz"
baz, err := client.Pods(api.NamespaceDefault).Create(pod)
if err != nil {
glog.Fatalf("Failed to create pod: %v, %v", pod, err)
}
if err := wait.Poll(time.Second, longTestTimeout, podRunning(client, baz.Namespace, baz.Name)); err != nil {
if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v, err: %v", pod, err)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: err: %v, perr: %v", err, perr)
}
}
glog.Info("Scheduler doesn't make phantom pods: test passed.")
}
示例7: WaitForABuild
// WaitForABuild waits for a Build object to match either isOK or isFailed conditions.
func WaitForABuild(c client.BuildInterface, name string, isOK, isFailed func(*buildapi.Build) bool) error {
// wait 2 minutes for build to exist
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
if _, err := c.Get(name); err != nil {
return false, nil
}
return true, nil
})
if err != nil {
return err
}
// wait longer for the build to run to completion
return wait.Poll(5*time.Second, 20*time.Minute, func() (bool, error) {
list, err := c.List(labels.Everything(), fields.Set{"name": name}.AsSelector())
if err != nil {
return false, err
}
for i := range list.Items {
if name == list.Items[i].Name && isOK(&list.Items[i]) {
return true, nil
}
if name != list.Items[i].Name || isFailed(&list.Items[i]) {
return false, fmt.Errorf("The build %q status is %q", name, &list.Items[i].Status.Phase)
}
}
rv := list.ResourceVersion
w, err := c.Watch(labels.Everything(), fields.Set{"name": name}.AsSelector(), rv)
if err != nil {
return false, err
}
defer w.Stop()
for {
val, ok := <-w.ResultChan()
if !ok {
// reget and re-watch
return false, nil
}
if e, ok := val.Object.(*buildapi.Build); ok {
if name == e.Name && isOK(e) {
return true, nil
}
if name != e.Name || isFailed(e) {
return false, fmt.Errorf("The build %q status is %q", name, e.Status.Phase)
}
}
}
})
}
示例8: MigTemplate
// MigTemplate (GCE-only) returns the name of the MIG template that the
// nodes of the cluster use.
func MigTemplate() (string, error) {
var errLast error
var templ string
key := "instanceTemplate"
if wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
// TODO(mikedanese): make this hit the compute API directly instead of
// shelling out to gcloud.
// An `instance-groups managed describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
"describe",
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
TestContext.CloudConfig.NodeInstanceGroup)
if err != nil {
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
return false, nil
}
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "instanceTemplate: url/to/<templ>" and
// return <templ>.
if val := ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
return false, nil
}) != nil {
return "", fmt.Errorf("MigTemplate() failed with last error: %v", errLast)
}
return templ, nil
}
示例9: TestMasterService
func TestMasterService(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
svcList, err := client.Core().Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return false, nil
}
found := false
for i := range svcList.Items {
if svcList.Items[i].Name == "kubernetes" {
found = true
break
}
}
if found {
ep, err := client.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil {
return false, nil
}
if countEndpoints(ep) == 0 {
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
}
return true, nil
}
return false, nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
示例10: Rename
func Rename(c coreclient.ReplicationControllersGetter, rc *api.ReplicationController, newName string) error {
oldName := rc.Name
rc.Name = newName
rc.ResourceVersion = ""
// First delete the oldName RC and orphan its pods.
trueVar := true
err := c.ReplicationControllers(rc.Namespace).Delete(oldName, &api.DeleteOptions{OrphanDependents: &trueVar})
if err != nil && !errors.IsNotFound(err) {
return err
}
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
_, err := c.ReplicationControllers(rc.Namespace).Get(oldName)
if err == nil {
return false, nil
} else if errors.IsNotFound(err) {
return true, nil
} else {
return false, err
}
})
if err != nil {
return err
}
// Then create the same RC with the new name.
_, err = c.ReplicationControllers(rc.Namespace).Create(rc)
if err != nil {
return err
}
return nil
}
示例11: UpdatePodWithRetries
// UpdatePodWithRetries updates a pod with given applyUpdate function. Note that pod not found error is ignored.
// The returned bool value can be used to tell if the pod is actually updated.
func UpdatePodWithRetries(podClient unversionedcore.PodInterface, pod *api.Pod, applyUpdate updatePodFunc) (*api.Pod, bool, error) {
var err error
var podUpdated bool
oldPod := pod
if err = wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
pod, err = podClient.Get(oldPod.Name)
if err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
// TODO: add precondition for update
applyUpdate(pod)
if pod, err = podClient.Update(pod); err == nil {
// Update successful.
return true, nil
}
// TODO: don't retry on perm-failed errors and handle them gracefully
// Update could have failed due to conflict error. Try again.
return false, nil
}); err == nil {
// When there's no error, we've updated this pod.
podUpdated = true
}
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("timed out trying to update pod: %+v", oldPod)
}
if errors.IsNotFound(err) {
glog.V(4).Infof("%s %s/%s is not found, skip updating it.", oldPod.Kind, oldPod.Namespace, oldPod.Name)
err = nil
}
return pod, podUpdated, err
}
示例12: TestCleanUp
func TestCleanUp(t *testing.T) {
m := newTestManager()
for _, probeType := range [...]probeType{liveness, readiness} {
key := probeKey{podUID, containerName, probeType}
w := newTestWorker(m, probeType, api.Probe{})
m.statusManager.SetPodStatus(w.pod, getRunningStatus())
go w.run()
m.workers[key] = w
// Wait for worker to run.
condition := func() (bool, error) {
ready, _ := resultsManager(m, probeType).Get(containerID)
return ready == results.Success, nil
}
if ready, _ := condition(); !ready {
if err := wait.Poll(100*time.Millisecond, util.ForeverTestTimeout, condition); err != nil {
t.Fatalf("[%s] Error waiting for worker ready: %v", probeType, err)
}
}
close(w.stop)
if err := waitForWorkerExit(m, []probeKey{key}); err != nil {
t.Fatalf("[%s] error waiting for worker exit: %v", probeType, err)
}
if _, ok := resultsManager(m, probeType).Get(containerID); ok {
t.Errorf("[%s] Expected result to be cleared.", probeType)
}
if _, ok := m.workers[key]; ok {
t.Errorf("[%s] Expected worker to be cleared.", probeType)
}
}
}
示例13: WaitForAttach
// WaitForAttach runs on the node to detect if the volume (referenced by LUN) is attached. If attached, the device path is returned
func (attacher *azureDiskAttacher) WaitForAttach(spec *volume.Spec, lunStr string, timeout time.Duration) (string, error) {
volumeSource, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if len(lunStr) == 0 {
return "", fmt.Errorf("WaitForAttach failed for Azure disk %q: lun is empty.", volumeSource.DiskName)
}
lun, err := strconv.Atoi(lunStr)
if err != nil {
return "", fmt.Errorf("WaitForAttach: wrong lun %q, err: %v", lunStr, err)
}
scsiHostRescan(&osIOHandler{})
exe := exec.New()
devicePath := ""
err = wait.Poll(checkSleepDuration, timeout, func() (bool, error) {
glog.V(4).Infof("Checking Azure disk %q(lun %s) is attached.", volumeSource.DiskName, lunStr)
if devicePath, err = findDiskByLun(lun, &osIOHandler{}, exe); err == nil {
glog.V(4).Infof("Successfully found attached Azure disk %q(lun %s, device path %s).", volumeSource.DiskName, lunStr, devicePath)
return true, nil
} else {
//Log error, if any, and continue checking periodically
glog.V(4).Infof("Error Stat Azure disk (%q) is attached: %v", volumeSource.DiskName, err)
return false, nil
}
})
return devicePath, err
}
示例14: assertFilesExist
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client *client.Client) {
var failed []string
expectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{}
for _, fileName := range fileNames {
if _, err := client.Get().
Namespace(pod.Namespace).
Resource("pods").
SubResource("proxy").
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw(); err != nil {
Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
}
示例15: checkTunnelsCorrect
func checkTunnelsCorrect(t *testing.T, tunnelList *SSHTunnelList, addresses []string) {
if err := wait.Poll(100*time.Millisecond, 2*time.Second, func() (bool, error) {
return hasCorrectTunnels(tunnelList, addresses), nil
}); err != nil {
t.Errorf("Error waiting for tunnels to reach expected state: %v. Expected %v, had %v", err, addresses, tunnelList)
}
}