本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.ExpectNoError函数的典型用法代码示例。如果您正苦于以下问题:Golang ExpectNoError函数的具体用法?Golang ExpectNoError怎么用?Golang ExpectNoError使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ExpectNoError函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: volumeTestCleanup
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
client := f.Client
podClient := client.Pods(config.namespace)
err := podClient.Delete(config.prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
示例2: waitForPodsOrDie
func waitForPodsOrDie(cs *release_1_4.Clientset, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, 10*time.Minute, func() (bool, error) {
selector, err := labels.Parse("foo=bar")
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: selector})
if err != nil {
return false, err
}
if pods == nil {
return false, fmt.Errorf("pods is nil")
}
if len(pods.Items) < n {
framework.Logf("pods: %v < %v", len(pods.Items), n)
return false, nil
}
ready := 0
for i := 0; i < n; i++ {
if pods.Items[i].Status.Phase == apiv1.PodRunning {
ready++
}
}
if ready < n {
framework.Logf("running pods: %v < %v", ready, n)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
示例3: tempSetCurrentKubeletConfig
// Must be called within a Context. Allows the function to modify the KubeletConfiguration during the BeforeEach of the context.
// The change is reverted in the AfterEach of the context.
func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(initialConfig *componentconfig.KubeletConfiguration)) {
var oldCfg *componentconfig.KubeletConfiguration
BeforeEach(func() {
configEnabled, err := isKubeletConfigEnabled(f)
framework.ExpectNoError(err)
if configEnabled {
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
clone, err := api.Scheme.DeepCopy(oldCfg)
framework.ExpectNoError(err)
newCfg := clone.(*componentconfig.KubeletConfiguration)
updateFunction(newCfg)
framework.ExpectNoError(setKubeletConfiguration(f, newCfg))
} else {
framework.Logf("The Dynamic Kubelet Configuration feature is not enabled.\n" +
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n" +
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
}
})
AfterEach(func() {
if oldCfg != nil {
err := setKubeletConfiguration(f, oldCfg)
framework.ExpectNoError(err)
}
})
}
示例4: cleanupDensityTest
func cleanupDensityTest(dtc DensityTestConfig) {
defer GinkgoRecover()
By("Deleting created Collections")
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
// TODO: Remove Deployment guard once GC is implemented for Deployments.
if framework.TestContext.GarbageCollectorEnabled && kind != extensions.Kind("Deployment") {
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
framework.ExpectNoError(err)
} else {
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
framework.ExpectNoError(err)
}
}
// Delete all secrets
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
dtc.ClientSet,
dtc.InternalClientset,
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
))
}
}
示例5: waitForIngress
func (j *testJig) waitForIngress() {
// Wait for the loadbalancer IP.
address, err := framework.WaitForIngressAddress(j.client, j.ing.Namespace, j.ing.Name, lbPollTimeout)
if err != nil {
framework.Failf("Ingress failed to acquire an IP address within %v", lbPollTimeout)
}
j.address = address
framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
timeoutClient := &http.Client{Timeout: reqTimeout}
// Check that all rules respond to a simple GET.
for _, rules := range j.ing.Spec.Rules {
proto := "http"
if len(j.ing.Spec.TLS) > 0 {
knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
if knownHosts.Has(rules.Host) {
timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
framework.ExpectNoError(err)
proto = "https"
}
}
for _, p := range rules.IngressRuleValue.HTTP.Paths {
j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, j.pollInterval, timeoutClient, false))
}
}
}
示例6: runPausePod
func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return pod
}
示例7: setKubeletAPIQPSLimit
// increaseKubeletAPIQPSLimit sets Kubelet API QPS via ConfigMap. Kubelet will restart with the new QPS.
func setKubeletAPIQPSLimit(f *framework.Framework, newAPIQPS int32) {
const restartGap = 40 * time.Second
resp := pollConfigz(2*time.Minute, 5*time.Second)
kubeCfg, err := decodeConfigz(resp)
framework.ExpectNoError(err)
framework.Logf("Old QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
// Set new API QPS limit
kubeCfg.KubeAPIQPS = newAPIQPS
// TODO(coufon): createConfigMap should firstly check whether configmap already exists, if so, use updateConfigMap.
// Calling createConfigMap twice will result in error. It is fine for benchmark test because we only run one test on a new node.
_, err = createConfigMap(f, kubeCfg)
framework.ExpectNoError(err)
// Wait for Kubelet to restart
time.Sleep(restartGap)
// Check new QPS has been set
resp = pollConfigz(2*time.Minute, 5*time.Second)
kubeCfg, err = decodeConfigz(resp)
framework.ExpectNoError(err)
framework.Logf("New QPS limit is: %d\n", kubeCfg.KubeAPIQPS)
// TODO(coufon): check test result to see if we need to retry here
if kubeCfg.KubeAPIQPS != newAPIQPS {
framework.Failf("Fail to set new kubelet API QPS limit.")
}
}
示例8: createNetProxyPods
func (config *KubeProxyTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(config.f.Client))
nodes := framework.GetReadySchedulableNodesOrDie(config.f.Client)
// create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes.Items))
for i, n := range nodes.Items {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes.Items))
for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name)
framework.ExpectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
示例9: waitForStableCluster
// Waits until all existing pods are scheduled and returns their amount.
func waitForStableCluster(c *client.Client) int {
timeout := 10 * time.Minute
startTime := time.Now()
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]api.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
currentPods = append(currentPods, pod)
}
}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := getPodsScheduled(allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
allPods, err := c.Pods(api.NamespaceAll).List(api.ListOptions{})
framework.ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = getPodsScheduled(allPods)
if startTime.Add(timeout).Before(time.Now()) {
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}
示例10: runAppArmorTest
func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus {
pod := createPodWithAppArmor(f, profile)
framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name))
p, err := f.PodClient().Get(pod.Name)
framework.ExpectNoError(err)
return p.Status
}
示例11: GetUnderlyingFederatedContexts
func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
kubeconfig := framework.KubeConfig{}
configBytes, err := ioutil.ReadFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err)
err = yaml.Unmarshal(configBytes, &kubeconfig)
framework.ExpectNoError(err)
e2eContexts := []E2EContext{}
for _, context := range kubeconfig.Contexts {
if strings.HasPrefix(context.Name, "federation") && context.Name != framework.TestContext.FederatedKubeContext {
user := kubeconfig.FindUser(context.Context.User)
if user == nil {
framework.Failf("Could not find user for context %+v", context)
}
cluster := kubeconfig.FindCluster(context.Context.Cluster)
if cluster == nil {
framework.Failf("Could not find cluster for context %+v", context)
}
dnsSubdomainName, err := GetValidDNSSubdomainName(context.Name)
if err != nil {
framework.Failf("Could not convert context name %s to a valid dns subdomain name, error: %s", context.Name, err)
}
e2eContexts = append(e2eContexts, E2EContext{
RawName: context.Name,
Name: dnsSubdomainName,
Cluster: cluster,
User: user,
})
}
}
return e2eContexts
}
示例12: getPodList
func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *v1.PodList {
selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
framework.ExpectNoError(err)
podList, err := p.c.Core().Pods(ps.Namespace).List(v1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err)
return podList
}
示例13: runAppArmorTest
func runAppArmorTest(f *framework.Framework, shouldRun bool, profile string) v1.PodStatus {
pod := createPodWithAppArmor(f, profile)
if shouldRun {
// The pod needs to start before it stops, so wait for the longer start timeout.
framework.ExpectNoError(framework.WaitTimeoutForPodNoLongerRunningInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name, "", framework.PodStartTimeout))
} else {
// Pod should remain in the pending state. Wait for the Reason to be set to "AppArmor".
w, err := f.PodClient().Watch(v1.SingleObject(metav1.ObjectMeta{Name: pod.Name}))
framework.ExpectNoError(err)
_, err = watch.Until(framework.PodStartTimeout, w, func(e watch.Event) (bool, error) {
switch e.Type {
case watch.Deleted:
return false, errors.NewNotFound(schema.GroupResource{Resource: "pods"}, pod.Name)
}
switch t := e.Object.(type) {
case *v1.Pod:
if t.Status.Reason == "AppArmor" {
return true, nil
}
}
return false, nil
})
framework.ExpectNoError(err)
}
p, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return p.Status
}
示例14: runPausePod
func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.Client, pod))
pod, err := f.Client.Pods(f.Namespace.Name).Get(conf.Name)
framework.ExpectNoError(err)
return pod
}
示例15: GetReplicas
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case kindRC:
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.Replicas)
case kindDeployment:
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.Replicas)
case kindReplicaSet:
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.Replicas)
default:
framework.Failf(invalidKind)
}
return 0
}