当前位置: 首页>>代码示例>>Golang>>正文


Golang framework.WaitForPersistentVolumeClaimPhase函数代码示例

本文整理汇总了Golang中k8s/io/kubernetes/test/e2e/framework.WaitForPersistentVolumeClaimPhase函数的典型用法代码示例。如果您正苦于以下问题:Golang WaitForPersistentVolumeClaimPhase函数的具体用法?Golang WaitForPersistentVolumeClaimPhase怎么用?Golang WaitForPersistentVolumeClaimPhase使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了WaitForPersistentVolumeClaimPhase函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。

示例1: waitOnPVandPVC

// Wait for the pv and pvc to bind to each other.
func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {

	// Wait for newly created PVC to bind to the PV
	framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
	err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
	Expect(err).NotTo(HaveOccurred())

	// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
	// since the PVC is already bound.
	err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
	Expect(err).NotTo(HaveOccurred())

	// Re-get the pv and pvc objects
	pv, err = c.Core().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// Re-get the pvc and
	pvc, err = c.Core().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// The pv and pvc are both bound, but to each other?
	// Check that the PersistentVolume.ClaimRef matches the PVC
	Expect(pv.Spec.ClaimRef).NotTo(BeNil())
	Expect(pv.Spec.ClaimRef.Name).To(Equal(pvc.Name))
	Expect(pvc.Spec.VolumeName).To(Equal(pv.Name))
	Expect(pv.Spec.ClaimRef.UID).To(Equal(pvc.UID))
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:28,代码来源:persistent_volumes.go

示例2: testDynamicProvisioning

func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {
	err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
	Expect(err).NotTo(HaveOccurred())

	By("checking the claim")
	// Get new copy of the claim
	claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// Get the bound PV
	pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
	Expect(err).NotTo(HaveOccurred())

	// Check sizes
	expectedCapacity := resource.MustParse(expectedSize)
	pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
	Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

	requestedCapacity := resource.MustParse(requestedSize)
	claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
	Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

	// Check PV properties
	Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
	expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
	Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
	Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
	Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

	// We start two pods:
	// - The first writes 'hello word' to the /mnt/test (= the volume).
	// - The second one runs grep 'hello world' on /mnt/test.
	// If both succeed, Kubernetes actually allocated something that is
	// persistent across pods.
	By("checking the created volume is writable")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")

	By("checking the created volume is readable and retains data")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")

	By("deleting the claim")
	framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))

	// Wait for the PV to get deleted. Technically, the first few delete
	// attempts may fail, as the volume is still attached to a node because
	// kubelet is slowly cleaning up a pod, however it should succeed in a
	// couple of minutes. Wait 20 minutes to recover from random cloud hiccups.
	framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:49,代码来源:volume_provisioning.go

示例3: waitOnPVandPVC

// Wait for the pv and pvc to bind to each other. Fail test on errors.
func waitOnPVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) error {

	// Wait for newly created PVC to bind to the PV
	framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
	err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
	if err != nil {
		return fmt.Errorf("PersistentVolumeClaim failed to enter a bound state: %+v", err)
	}

	// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
	// since the PVC is already bound.
	err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
	if err != nil {
		return fmt.Errorf("PersistentVolume failed to enter a bound state even though PVC is Bound: %+v", err)
	}

	return nil
}
开发者ID:AdoHe,项目名称:kubernetes,代码行数:19,代码来源:persistent_volumes.go

示例4: waitAndVerifyBinds

// Search for bound PVs and PVCs by examining pvols for non-nil claimRefs.
// NOTE: Each iteration waits for a maximum of 3 minutes per PV and, if the PV is bound,
//   up to 3 minutes for the PVC. When the number of PVs != number of PVCs, this can lead
//   to situations where the maximum wait times are reached several times in succession,
//   extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
//   small.
func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pvcmap, testExpected bool) {

	var actualBinds int
	expectedBinds := len(pvols)
	if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
		expectedBinds = len(claims)
	}

	for pvName := range pvols {
		err := framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
		if err != nil && len(pvols) > len(claims) {
			framework.Logf("WARN: pv %v is not bound after max wait", pvName)
			framework.Logf("      This may be ok since there are more pvs than pvcs")
			continue
		}
		Expect(err).NotTo(HaveOccurred())

		pv, err := c.Core().PersistentVolumes().Get(pvName, metav1.GetOptions{})
		Expect(err).NotTo(HaveOccurred())
		if cr := pv.Spec.ClaimRef; cr != nil && len(cr.Name) > 0 {
			// Assert bound pvc is a test resource. Failing assertion could
			// indicate non-test PVC interference or a bug in the test
			pvcKey := makePvcKey(ns, cr.Name)
			_, found := claims[pvcKey]
			Expect(found).To(BeTrue())

			err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
			Expect(err).NotTo(HaveOccurred())
			actualBinds++
		}
	}

	if testExpected {
		Expect(actualBinds).To(Equal(expectedBinds))
	}
}
开发者ID:kubernetes,项目名称:kubernetes,代码行数:42,代码来源:persistent_volumes.go

示例5: testDynamicProvisioning

func testDynamicProvisioning(client clientset.Interface, claim *api.PersistentVolumeClaim) {
	err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
	Expect(err).NotTo(HaveOccurred())

	By("checking the claim")
	// Get new copy of the claim
	claim, err = client.Core().PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
	Expect(err).NotTo(HaveOccurred())

	// Get the bound PV
	pv, err := client.Core().PersistentVolumes().Get(claim.Spec.VolumeName)
	Expect(err).NotTo(HaveOccurred())

	// Check sizes
	expectedCapacity := resource.MustParse(expectedSize)
	pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
	Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))

	requestedCapacity := resource.MustParse(requestedSize)
	claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
	Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))

	// Check PV properties
	Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
	expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
	Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
	Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
	Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))

	// We start two pods:
	// - The first writes 'hello word' to the /mnt/test (= the volume).
	// - The second one runs grep 'hello world' on /mnt/test.
	// If both succeed, Kubernetes actually allocated something that is
	// persistent across pods.
	By("checking the created volume is writable")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")

	By("checking the created volume is readable and retains data")
	runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")

	// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
	// probably collide with destruction of the pods above - the pods
	// still have the volume attached (kubelet is slow...) and deletion
	// of attached volume is not allowed by AWS/GCE/OpenStack.
	// Kubernetes *will* retry deletion several times in
	// pvclaimbinder-sync-period.
	// So, technically, this sleep is not needed. On the other hand,
	// the sync perion is 10 minutes and we really don't want to wait
	// 10 minutes here. There is no way how to see if kubelet is
	// finished with cleaning volumes. A small sleep here actually
	// speeds up the test!
	// Three minutes should be enough to clean up the pods properly.
	// We've seen GCE PD detach to take more than 1 minute.
	By("Sleeping to let kubelet destroy all pods")
	time.Sleep(3 * time.Minute)

	By("deleting the claim")
	framework.ExpectNoError(client.Core().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))

	// Wait for the PV to get deleted too.
	framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
开发者ID:eljefedelrodeodeljefe,项目名称:kubernetes,代码行数:62,代码来源:volume_provisioning.go

示例6:

		c = f.Client
		ns = f.Namespace.Name
	})

	framework.KubeDescribe("DynamicProvisioner", func() {
		It("should create and delete persistent volumes", func() {
			framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
			By("creating a claim with a dynamic provisioning annotation")
			claim := createClaim(ns)
			defer func() {
				c.PersistentVolumeClaims(ns).Delete(claim.Name)
			}()
			claim, err := c.PersistentVolumeClaims(ns).Create(claim)
			Expect(err).NotTo(HaveOccurred())

			err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
			Expect(err).NotTo(HaveOccurred())

			By("checking the claim")
			// Get new copy of the claim
			claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
			Expect(err).NotTo(HaveOccurred())

			// Get the bound PV
			pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
			Expect(err).NotTo(HaveOccurred())

			// Check sizes
			expectedCapacity := resource.MustParse(expectedSize)
			pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
			Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
开发者ID:CodeJuan,项目名称:kubernetes,代码行数:31,代码来源:volume_provisioning.go

示例7:

		// defer deletion to clean up the PV should the test fail post-creation.
		framework.Logf("Creating PersistentVolume")
		pv, err := c.PersistentVolumes().Create(pv)
		if err != nil {
			framework.Failf("Create PersistentVolume failed: %v", err)
		}
		defer deletePersistentVolume(c, pv)
		framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 1*time.Second, 20*time.Second)

		// Create the PersistentVolumeClaim and wait for Bound phase
		framework.Logf("Creating PersistentVolumeClaim")
		pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
		if err != nil {
			framework.Failf("Create PersistentVolumeClaim failed: %v", err)
		}
		framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)

		// Wait for PersistentVolume.Status.Phase to be Bound. Can take several minutes.
		err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
		if err != nil {
			framework.Failf("PersistentVolume failed to enter a bound state: %+v", err)
		}
		// Check the PersistentVolume.ClaimRef.UID for non-nil value as confirmation of the bound state.
		framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
		pv, err = c.PersistentVolumes().Get(pv.Name)
		if pv.Spec.ClaimRef == nil || len(pv.Spec.ClaimRef.UID) == 0 {
			pvJson, _ := json.MarshalIndent(pv, "", "  ")
			framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef or UID: %+v", string(pvJson))
		}

		// Check the PersistentVolumeClaim.Status.Phase for Bound state
开发者ID:RyanBinfeng,项目名称:kubernetes,代码行数:31,代码来源:persistent_volumes.go


注:本文中的k8s/io/kubernetes/test/e2e/framework.WaitForPersistentVolumeClaimPhase函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。