本文整理汇总了Golang中k8s/io/kubernetes/pkg/util/wait.PollImmediate函数的典型用法代码示例。如果您正苦于以下问题:Golang PollImmediate函数的具体用法?Golang PollImmediate怎么用?Golang PollImmediate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了PollImmediate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: waitForRunning
func (p *petSetTester) waitForRunning(numPets int32, ps *apps.PetSet) {
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout,
func() (bool, error) {
podList := p.getPodList(ps)
if int32(len(podList.Items)) < numPets {
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if int32(len(podList.Items)) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := api.IsPodReady(&p)
if p.Status.Phase != api.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
}
p.waitForStatus(ps, numPets)
}
示例2: WaitForPodCreationServiceAccounts
// WaitForPodCreationServiceAccounts ensures that the service account needed for pod creation exists
// and that the cache for the admission control that checks for pod tokens has caught up to allow
// pod creation.
func WaitForPodCreationServiceAccounts(client *kclient.Client, namespace string) error {
if err := WaitForServiceAccounts(client, namespace, []string{bootstrappolicy.DefaultServiceAccountName}); err != nil {
return err
}
testPod := &kapi.Pod{}
testPod.GenerateName = "test"
testPod.Spec.Containers = []kapi.Container{
{
Name: "container",
Image: "openshift/origin-pod:latest",
},
}
return wait.PollImmediate(time.Second, PodCreationWaitTimeout, func() (bool, error) {
pod, err := client.Pods(namespace).Create(testPod)
if err != nil {
glog.Warningf("Error attempting to create test pod: %v", err)
return false, nil
}
err = client.Pods(namespace).Delete(pod.Name, kapi.NewDeleteOptions(0))
if err != nil {
return false, err
}
return true, nil
})
}
示例3: waitForRunning
func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet, shouldBeReady bool) {
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
podList := p.getPodList(ps)
if int32(len(podList.Items)) < numPets {
framework.Logf("Found %d stateful pods, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if int32(len(podList.Items)) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := v1.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady
framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil
}
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed waiting for pods to enter running: %v", pollErr)
}
}
示例4: Scale
// Scale updates a ReplicationController to a new size, with optional precondition check (if preconditions is not nil),
// optional retries (if retry is not nil), and then optionally waits for it's replica count to reach the new value
// (if wait is not nil).
func (scaler *ReplicationControllerScaler) Scale(namespace, name string, newSize uint, preconditions *ScalePrecondition, retry, waitForReplicas *RetryParams) error {
if preconditions == nil {
preconditions = &ScalePrecondition{-1, ""}
}
if retry == nil {
// Make it try only once, immediately
retry = &RetryParams{Interval: time.Millisecond, Timeout: time.Millisecond}
}
cond := ScaleCondition(scaler, preconditions, namespace, name, newSize)
if err := wait.PollImmediate(retry.Interval, retry.Timeout, cond); err != nil {
return err
}
if waitForReplicas != nil {
watchOptions := api.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", name), ResourceVersion: "0"}
watcher, err := scaler.c.ReplicationControllers(namespace).Watch(watchOptions)
if err != nil {
return err
}
_, err = watch.Until(waitForReplicas.Timeout, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added && event.Type != watch.Modified {
return false, nil
}
rc := event.Object.(*api.ReplicationController)
return rc.Status.ObservedGeneration >= rc.Generation && rc.Status.Replicas == rc.Spec.Replicas, nil
})
if err == wait.ErrWaitTimeout {
return fmt.Errorf("timed out waiting for %q to be synced", name)
}
return err
}
return nil
}
示例5: TestCacheWatcherCleanupNotBlockedByResult
// verifies the cacheWatcher.process goroutine is properly cleaned up even if
// the writes to cacheWatcher.result channel is blocked.
func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) {
var lock sync.RWMutex
count := 0
filter := func(string, labels.Set, fields.Set) bool { return true }
forget := func(bool) {
lock.Lock()
defer lock.Unlock()
count++
}
initEvents := []watchCacheEvent{
{Object: &api.Pod{}},
{Object: &api.Pod{}},
}
// set the size of the buffer of w.result to 0, so that the writes to
// w.result is blocked.
w := newCacheWatcher(0, 0, initEvents, filter, forget)
w.Stop()
if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
lock.RLock()
defer lock.RUnlock()
return count == 2, nil
}); err != nil {
t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err)
}
}
示例6: cleanupServiceShardLoadBalancer
func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, timeout time.Duration) error {
provider := framework.TestContext.CloudConfig.Provider
if provider == nil {
return fmt.Errorf("cloud provider undefined")
}
internalSvc := &v1.Service{}
err := api.Scheme.Convert(service, internalSvc, nil)
if err != nil {
return fmt.Errorf("failed to convert versioned service object to internal type: %v", err)
}
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
lbi, supported := provider.LoadBalancer()
if !supported {
return false, fmt.Errorf("%q doesn't support load balancers", provider.ProviderName())
}
err := lbi.EnsureLoadBalancerDeleted(clusterName, internalSvc)
if err != nil {
// Deletion failed with an error, try again.
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, service.Namespace, clusterName)
return false, nil
}
By(fmt.Sprintf("Cloud provider resources for Service %q in namespace %q in cluster %q deleted", service.Name, service.Namespace, clusterName))
return true, nil
})
return err
}
示例7: waitForPodsOrDie
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
if err != nil {
return false, err
}
if pods == nil {
return false, fmt.Errorf("pods is nil")
}
if len(pods.Items) < n {
framework.Logf("pods: %v < %v", len(pods.Items), n)
return false, nil
}
ready := 0
for i := 0; i < n; i++ {
if pods.Items[i].Status.Phase == v1.PodRunning {
ready++
}
}
if ready < n {
framework.Logf("running pods: %v < %v", ready, n)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
示例8: scale
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
name := ps.Name
ns := ps.Namespace
p.update(ns, name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = count })
var petList *v1.PodList
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
petList = p.getPodList(ps)
if int32(len(petList.Items)) == count {
return true, nil
}
return false, nil
})
if pollErr != nil {
unhealthy := []string{}
for _, pet := range petList.Items {
delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, v1.IsPodReady(&pet)
if delTs != nil || phase != v1.PodRunning || !readiness {
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
}
}
return fmt.Errorf("Failed to scale statefulset to %d in %v. Remaining pods:\n%v", count, statefulsetTimeout, unhealthy)
}
return nil
}
示例9: deleteFederationNs
func (f *Framework) deleteFederationNs() {
ns := f.FederationNamespace
By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
timeout := 5 * time.Minute
if f.NamespaceDeletionTimeout != 0 {
timeout = f.NamespaceDeletionTimeout
}
clientset := f.FederationClientset_1_4
// First delete the namespace from federation apiserver.
if err := clientset.Core().Namespaces().Delete(ns.Name, &api.DeleteOptions{}); err != nil {
Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
}
// Verify that it got deleted.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := clientset.Core().Namespaces().Get(ns.Name); err != nil {
if apierrs.IsNotFound(err) {
return true, nil
}
Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
if err != nil {
if !apierrs.IsNotFound(err) {
Failf("Couldn't delete ns %q: %s", ns.Name, err)
} else {
Logf("Namespace %v was already deleted", ns.Name)
}
}
// TODO: Delete the namespace from underlying clusters.
}
示例10: waitForFederatedServiceShard
// waitForFederatedServiceShard waits until the number of shards of a given federated
// service reaches the expected value, i.e. numSvcs in the given individual Kubernetes
// cluster. If the shard count, i.e. numSvcs is expected to be at least one, then
// it also checks if the first shard's name and spec matches that of the given service.
func waitForFederatedServiceShard(cs *release_1_3.Clientset, namespace string, service *api.Service, numSvcs int) {
By("Fetching a federated service shard")
var clSvcList *v1.ServiceList
if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
var err error
clSvcList, err = cs.Core().Services(namespace).List(api.ListOptions{})
if err != nil {
return false, err
}
n := len(clSvcList.Items)
if n == numSvcs {
return true, nil
}
framework.Logf("%d services found, waiting for %d, trying again in %s", n, numSvcs, framework.Poll)
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
if numSvcs > 0 && service != nil {
// Renaming for clarity/readability
clSvc := clSvcList.Items[0]
Expect(clSvc.Name).To(Equal(service.Name))
Expect(clSvc.Spec).To(Equal(service.Spec))
}
}
示例11: ensureComponentAuthorizationRules
// ensureComponentAuthorizationRules initializes the cluster policies
func (c *MasterConfig) ensureComponentAuthorizationRules() {
clusterPolicyRegistry := clusterpolicyregistry.NewRegistry(clusterpolicystorage.NewStorage(c.EtcdHelper))
ctx := kapi.WithNamespace(kapi.NewContext(), "")
if _, err := clusterPolicyRegistry.GetClusterPolicy(ctx, authorizationapi.PolicyName); kapierror.IsNotFound(err) {
glog.Infof("No cluster policy found. Creating bootstrap policy based on: %v", c.Options.PolicyConfig.BootstrapPolicyFile)
if err := admin.OverwriteBootstrapPolicy(c.EtcdHelper, c.Options.PolicyConfig.BootstrapPolicyFile, admin.CreateBootstrapPolicyFileFullCommand, true, ioutil.Discard); err != nil {
glog.Errorf("Error creating bootstrap policy: %v", err)
}
} else {
glog.V(2).Infof("Ignoring bootstrap policy file because cluster policy found")
}
// Wait until the policy cache has caught up before continuing
review := &authorizationapi.SubjectAccessReview{Action: authorizationapi.AuthorizationAttributes{Verb: "get", Resource: "clusterpolicies"}}
err := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) {
result, err := c.PolicyClient().SubjectAccessReviews().Create(review)
if err == nil && result.Allowed {
return true, nil
}
if kapierror.IsForbidden(err) || (err == nil && !result.Allowed) {
glog.V(2).Infof("waiting for policy cache to initialize")
return false, nil
}
return false, err
})
if err != nil {
glog.Errorf("error waiting for policy cache to initialize: %v", err)
}
}
示例12: waitForPodsOrDie
func waitForPodsOrDie(cs *release_1_4.Clientset, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, 10*time.Minute, func() (bool, error) {
selector, err := labels.Parse("foo=bar")
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: selector})
if err != nil {
return false, err
}
if pods == nil {
return false, fmt.Errorf("pods is nil")
}
if len(pods.Items) < n {
framework.Logf("pods: %v < %v", len(pods.Items), n)
return false, nil
}
ready := 0
for i := 0; i < n; i++ {
if pods.Items[i].Status.Phase == apiv1.PodRunning {
ready++
}
}
if ready < n {
framework.Logf("running pods: %v < %v", ready, n)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
示例13: waitForDelete
func (o *DrainOptions) waitForDelete(pods []api.Pod, interval, timeout time.Duration, usingEviction bool, getPodFn func(string, string) (*api.Pod, error)) ([]api.Pod, error) {
var verbStr string
if usingEviction {
verbStr = "evicted"
} else {
verbStr = "deleted"
}
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
pendingPods := []api.Pod{}
for i, pod := range pods {
p, err := getPodFn(pod.Namespace, pod.Name)
if apierrors.IsNotFound(err) || (p != nil && p.ObjectMeta.UID != pod.ObjectMeta.UID) {
cmdutil.PrintSuccess(o.mapper, false, o.out, "pod", pod.Name, false, verbStr)
continue
} else if err != nil {
return false, err
} else {
pendingPods = append(pendingPods, pods[i])
}
}
pods = pendingPods
if len(pendingPods) > 0 {
return false, nil
}
return true, nil
})
return pods, err
}
示例14: GetSelfURL
// GetSelfURL executes a curl against the given path via kubectl exec into a
// test container running with host networking, and fails if the output
// doesn't match the expected string.
func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
cmd := fmt.Sprintf("curl -q -s --connect-timeout 1 http://localhost:10249%s", path)
By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
// These are arbitrary timeouts. The curl command should pass on first try,
// unless kubeproxy is starved/bootstrapping/restarting etc.
const retryInterval = 1 * time.Second
const retryTimeout = 30 * time.Second
podName := config.HostTestContainerPod.Name
var msg string
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
Logf(msg)
return false, nil
}
if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
Logf(msg)
return false, nil
}
return true, nil
}); pollErr != nil {
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg)
}
}
示例15: waitForPodCondition
// waitForPodCondition waits for a pod in state defined by a condition (func)
func waitForPodCondition(kubeClient *unversioned.Client, ns, podName string, condition func(pod *api.Pod) (bool, error),
interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
pod, err := kubeClient.Pods(ns).Get(podName)
if err != nil {
if apierrs.IsNotFound(err) {
return false, nil
}
}
done, err := condition(pod)
if err != nil {
return false, err
}
if done {
return true, nil
}
return false, nil
})
if err != nil {
return fmt.Errorf("timed out waiting to observe own status as Running")
}
return nil
}