本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.Failf函数的典型用法代码示例。如果您正苦于以下问题:Golang Failf函数的具体用法?Golang Failf怎么用?Golang Failf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Failf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: testAppArmorNode
func testAppArmorNode() {
BeforeEach(func() {
By("Loading AppArmor profiles for testing")
framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles")
})
Context("when running with AppArmor", func() {
f := framework.NewDefaultFramework("apparmor-test")
It("should reject an unloaded profile", func() {
status := runAppArmorTest(f, "localhost/"+"non-existant-profile")
Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status)
Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status)
})
It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state)
})
It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state)
})
})
}
示例2: updateIngressOrFail
func updateIngressOrFail(clientset *federation_release_1_4.Clientset, namespace string) (newIng *v1beta1.Ingress) {
var err error
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
ingress := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: FederatedIngressName,
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "updated-testingress-service",
ServicePort: intstr.FromInt(80),
},
},
}
for MaxRetriesOnFederatedApiserver := 0; MaxRetriesOnFederatedApiserver < 3; MaxRetriesOnFederatedApiserver++ {
_, err = clientset.Extensions().Ingresses(namespace).Get(FederatedIngressName)
if err != nil {
framework.Failf("failed to get ingress %q: %v", FederatedIngressName, err)
}
newIng, err = clientset.Extensions().Ingresses(namespace).Update(ingress)
if err == nil {
describeIng(namespace)
return
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update ingress %q: %v", FederatedIngressName, err)
}
}
framework.Failf("too many retries updating ingress %q", FederatedIngressName)
return newIng
}
示例3: GetUnderlyingFederatedContexts
func (f *Framework) GetUnderlyingFederatedContexts() []E2EContext {
kubeconfig := framework.KubeConfig{}
configBytes, err := ioutil.ReadFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err)
err = yaml.Unmarshal(configBytes, &kubeconfig)
framework.ExpectNoError(err)
e2eContexts := []E2EContext{}
for _, context := range kubeconfig.Contexts {
if strings.HasPrefix(context.Name, "federation") && context.Name != framework.TestContext.FederatedKubeContext {
user := kubeconfig.FindUser(context.Context.User)
if user == nil {
framework.Failf("Could not find user for context %+v", context)
}
cluster := kubeconfig.FindCluster(context.Context.Cluster)
if cluster == nil {
framework.Failf("Could not find cluster for context %+v", context)
}
dnsSubdomainName, err := GetValidDNSSubdomainName(context.Name)
if err != nil {
framework.Failf("Could not convert context name %s to a valid dns subdomain name, error: %s", context.Name, err)
}
e2eContexts = append(e2eContexts, E2EContext{
RawName: context.Name,
Name: dnsSubdomainName,
Cluster: cluster,
User: user,
})
}
}
return e2eContexts
}
示例4: validateDNSResults
func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {
By("submitting the pod to kubernetes")
podClient := f.Client.Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name)
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find results for each expected name.
By("looking for the results for each expected name from probiers")
assertFilesExist(fileNames, "results", pod, f.Client)
// TODO: probe from the host, too.
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
示例5: updateSecretOrFail
func updateSecretOrFail(clientset *federation_release_1_4.Clientset, namespace string) *v1.Secret {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
var err error
var newSecret *v1.Secret
secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: UpdatedFederatedSecretName,
},
}
for retryCount := 0; retryCount < MaxRetries; retryCount++ {
_, err = clientset.Core().Secrets(namespace).Get(FederatedSecretName)
if err != nil {
framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
}
newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
if err == nil {
return newSecret
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
}
}
framework.Failf("too many retries updating secret %q", FederatedSecretName)
return newSecret
}
示例6: updateDaemonSetOrFail
func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.DaemonSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateDaemonSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
var newDaemonSet *v1beta1.DaemonSet
for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ {
daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName)
if err != nil {
framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err)
}
// Update one of the data in the daemonset.
daemonset.Annotations = map[string]string{"ccc": "ddd"}
newDaemonSet, err = clientset.Extensions().DaemonSets(namespace).Update(daemonset)
if err == nil {
return newDaemonSet
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update daemonset %q: %v", FederatedDaemonSetName, err)
}
}
framework.Failf("too many retries updating daemonset %q", FederatedDaemonSetName)
return newDaemonSet
}
示例7: deleteFederationNs
func (f *Framework) deleteFederationNs() {
ns := f.FederationNamespace
By(fmt.Sprintf("Destroying federation namespace %q for this suite.", ns.Name))
timeout := 5 * time.Minute
if f.NamespaceDeletionTimeout != 0 {
timeout = f.NamespaceDeletionTimeout
}
clientset := f.FederationClientset
// First delete the namespace from federation apiserver.
// Also delete the corresponding namespaces from underlying clusters.
orphanDependents := false
if err := clientset.Core().Namespaces().Delete(ns.Name, &v1.DeleteOptions{OrphanDependents: &orphanDependents}); err != nil {
framework.Failf("Error while deleting federation namespace %s: %s", ns.Name, err)
}
// Verify that it got deleted.
err := wait.PollImmediate(5*time.Second, timeout, func() (bool, error) {
if _, err := clientset.Core().Namespaces().Get(ns.Name, metav1.GetOptions{}); err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}
framework.Logf("Error while waiting for namespace to be terminated: %v", err)
return false, nil
}
return false, nil
})
if err != nil {
if !apierrors.IsNotFound(err) {
framework.Failf("Couldn't delete ns %q: %s", ns.Name, err)
} else {
framework.Logf("Namespace %v was already deleted", ns.Name)
}
}
}
示例8: updateSecretOrFail
func updateSecretOrFail(clientset *fedclientset.Clientset, namespace string) *v1.Secret {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
var newSecret *v1.Secret
for retryCount := 0; retryCount < MaxRetries; retryCount++ {
secret, err := clientset.Core().Secrets(namespace).Get(FederatedSecretName)
if err != nil {
framework.Failf("failed to get secret %q: %v", FederatedSecretName, err)
}
// Update one of the data in the secret.
secret.Data = map[string][]byte{
"key": []byte("value"),
}
newSecret, err = clientset.Core().Secrets(namespace).Update(secret)
if err == nil {
return newSecret
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update secret %q: %v", FederatedSecretName, err)
}
}
framework.Failf("too many retries updating secret %q", FederatedSecretName)
return newSecret
}
示例9: testUnderTemporaryNetworkFailure
// Blocks outgoing network traffic on 'node'. Then runs testFunc and returns its status.
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *api.Node, testFunc func()) {
host := framework.GetNodeExternalIP(node)
master := framework.GetMasterAddress(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
// It is on purpose because we may have an error even if the new rule
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
}()
framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
framework.BlockNetwork(host, master)
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
testFunc()
// network traffic is unblocked in a deferred function
}
示例10: startPodAndGetBackOffs
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
Expect(pod.Spec.Containers).NotTo(BeEmpty())
podName := pod.Name
containerName := pod.Spec.Containers[0].Name
By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
return delay1, delay2
}
示例11: GetReplicas
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case kindRC:
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.Replicas)
case kindDeployment:
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.Replicas)
case kindReplicaSet:
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.Replicas)
default:
framework.Failf(invalidKind)
}
return 0
}
示例12: performTemporaryNetworkFailure
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
// that belongs to replication controller 'rcName', really disappeared.
// Finally, it checks that the replication controller recreates the
// pods on another node and that now the number of replicas is equal 'replicas'.
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replicas int32, podNameToDisappear string, node *api.Node) {
host := getNodeExternalIP(node)
master := getMaster(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
// It is on purpose because we may have an error even if the new rule
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master)
}()
framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
framework.BlockNetwork(host, master)
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
framework.Logf("Waiting for pod %s to be removed", podNameToDisappear)
err := framework.WaitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
Expect(err).NotTo(HaveOccurred())
By("verifying whether the pod from the unreachable node is recreated")
err = framework.VerifyPods(c, ns, rcName, true, replicas)
Expect(err).NotTo(HaveOccurred())
// network traffic is unblocked in a deferred function
}
示例13: getTestNodeInfo
// getTestNodeInfo fetches the capacity of a node from API server and returns a map of labels.
func getTestNodeInfo(f *framework.Framework, testName string) map[string]string {
nodeName := framework.TestContext.NodeName
node, err := f.ClientSet.Core().Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
cpu, ok := node.Status.Capacity["cpu"]
if !ok {
framework.Failf("Fail to fetch CPU capacity value of test node.")
}
memory, ok := node.Status.Capacity["memory"]
if !ok {
framework.Failf("Fail to fetch Memory capacity value of test node.")
}
cpuValue, ok := cpu.AsInt64()
if !ok {
framework.Failf("Fail to fetch CPU capacity value as Int64.")
}
memoryValue, ok := memory.AsInt64()
if !ok {
framework.Failf("Fail to fetch Memory capacity value as Int64.")
}
return map[string]string{
"node": nodeName,
"test": testName,
"image": node.Status.NodeInfo.OSImage,
"machine": fmt.Sprintf("cpu:%dcore,memory:%.1fGB", cpuValue, float32(memoryValue)/(1024*1024*1024)),
}
}
示例14: testHostIP
// testHostIP tests that a pod gets a host IP
func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns)
By("creating pod")
defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod: %v", err)
}
By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
err := framework.WaitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name)
Expect(err).NotTo(HaveOccurred())
if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
示例15: validateTargetedProbeOutput
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name)
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}