本文整理匯總了Golang中k8s/io/kubernetes/pkg/client/unversioned.Client.PersistentVolumeClaims方法的典型用法代碼示例。如果您正苦於以下問題:Golang Client.PersistentVolumeClaims方法的具體用法?Golang Client.PersistentVolumeClaims怎麽用?Golang Client.PersistentVolumeClaims使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/kubernetes/pkg/client/unversioned.Client
的用法示例。
在下文中一共展示了Client.PersistentVolumeClaims方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: createMissingPVs
func createMissingPVs(c *k8sclient.Client, ns string) {
found, pvcs, pendingClaimNames := findPendingPVs(c, ns)
if found {
sshCommand := ""
createPV(c, ns, pendingClaimNames, sshCommand)
items := pvcs.Items
for _, item := range items {
status := item.Status.Phase
if status == api.ClaimPending || status == api.ClaimLost {
err := c.PersistentVolumeClaims(ns).Delete(item.ObjectMeta.Name)
if err != nil {
util.Infof("Error deleting PVC %s\n", item.ObjectMeta.Name)
} else {
util.Infof("Recreating PVC %s\n", item.ObjectMeta.Name)
c.PersistentVolumeClaims(ns).Create(&api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: item.ObjectMeta.Name,
Namespace: ns,
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: ns + "-" + item.ObjectMeta.Name,
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
})
}
}
}
}
}
示例2: waitAndValidatePVandPVC
// Waits for the pv and pvc to be bound to each other, then checks that the pv's
// claimRef matches the pvc. Fails test on errors.
func waitAndValidatePVandPVC(c *client.Client, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
var err error
// Wait for pv and pvc to bind to each other
if err = waitOnPVandPVC(c, ns, pv, pvc); err != nil {
return pv, pvc, err
}
// Check that the PersistentVolume.ClaimRef is valid and matches the PVC
framework.Logf("Checking PersistentVolume ClaimRef is non-nil")
pv, err = c.PersistentVolumes().Get(pv.Name)
if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
}
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolumeClaim %v:", pvc.Name)
}
if pv.Spec.ClaimRef == nil || pv.Spec.ClaimRef.UID != pvc.UID {
pvJSON, _ := json.Marshal(pv.Spec.ClaimRef)
return pv, pvc, fmt.Errorf("Expected Bound PersistentVolume %v to have valid ClaimRef: %+v", pv.Name, string(pvJSON))
}
return pv, pvc, nil
}
示例3: createPVC
// create the PVC resource. Fails test on error.
func createPVC(c *client.Client, ns string, pvc *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
pvc, err := c.PersistentVolumeClaims(ns).Create(pvc)
if err != nil {
return pvc, fmt.Errorf("Create PersistentVolumeClaim %v failed: %v", pvc.Name, err)
}
return pvc, nil
}
示例4: validatePersistenceVolumeClaims
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
ns, _, err := f.DefaultNamespace()
if err != nil {
return Failure, err
}
rc, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
if err != nil {
util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
}
if rc != nil {
items := rc.Items
pendingClaimNames := make([]string, 0, len(items))
for _, item := range items {
status := item.Status.Phase
if status != "Bound" {
pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
}
}
if len(pendingClaimNames) > 0 {
util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
util.Info(`
You can enable dynamic PersistentVolume creation with Kubernetes 1.4 or later.
Or to get gofabric8 to create HostPath based PersistentVolume resources for you on minikube and minishift type:
gofabric8 volumes
For other clusters you could do something like this - though ideally with a persistent volume implementation other than hostPath:
cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: fabric8
spec:
accessModes:
- ReadWrite
capacity:
storage: 1000
hostPath:
path: /opt/fabric8-data
EOF
`)
return Failure, err
}
return Success, err
}
return Failure, err
}
示例5: validatePersistenceVolumeClaims
func validatePersistenceVolumeClaims(c *k8sclient.Client, f *cmdutil.Factory) (Result, error) {
ns, _, err := f.DefaultNamespace()
if err != nil {
return Failure, err
}
rc, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
if err != nil {
util.Fatalf("Failed to get PersistentVolumeClaims, %s in namespace %s\n", err, ns)
}
if rc != nil {
items := rc.Items
pendingClaimNames := make([]string, 0, len(items))
for _, item := range items {
status := item.Status.Phase
if status != "Bound" {
pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
}
}
if len(pendingClaimNames) > 0 {
util.Failuref("PersistentVolumeClaim not Bound for: %s. You need to create a PersistentVolume!\n", strings.Join(pendingClaimNames, ", "))
util.Info(`
to generate a single node PersistentVolume then type something like this:
cat <<EOF | oc create -f -
---
kind: PersistentVolume
apiVersion: v1
metadata:
name: fabric8
spec:
accessModes:
- ReadWrite
capacity:
storage: 1000
hostPath:
path: /opt/fabric8-data
EOF
`)
return Failure, err
}
return Success, err
}
return Failure, err
}
示例6: GetPersistentVolumeClaimDetail
// GetPersistentVolumeClaimDetail returns detailed information about a persistent volume claim
func GetPersistentVolumeClaimDetail(client *client.Client, namespace string, name string) (*PersistentVolumeClaimDetail, error) {
log.Printf("Getting details of %s persistent volume claim", name)
rawPersistentVolumeClaim, err := client.PersistentVolumeClaims(namespace).Get(name)
if err != nil {
return nil, err
}
return getPersistentVolumeClaimDetail(rawPersistentVolumeClaim), nil
}
示例7: findPendingPVS
func findPendingPVS(c *k8sclient.Client, ns string) (bool, []string) {
pvcs, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
if err != nil {
util.Infof("Failed to find any PersistentVolumeClaims, %s in namespace %s\n", err, ns)
}
if pvcs != nil {
items := pvcs.Items
pendingClaimNames := make([]string, 0, len(items))
for _, item := range items {
status := item.Status.Phase
if status == "Pending" || status == "Lost" {
pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
}
}
if len(pendingClaimNames) > 0 {
return true, pendingClaimNames
}
}
return false, nil
}
示例8: deletePVCandValidatePV
// Delete the PVC and wait for the PV to become Available again.
// Validate that the PV has recycled (assumption here about reclaimPolicy).
func deletePVCandValidatePV(c *client.Client, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume) (*api.PersistentVolume, *api.PersistentVolumeClaim, error) {
By("Deleting PersistentVolumeClaim to trigger PV Recycling")
framework.Logf("Deleting PersistentVolumeClaim %v to trigger PV Recycling", pvc.Name)
err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
if err != nil {
return pv, pvc, fmt.Errorf("Delete of PVC %v failed: %v", pvc.Name, err)
}
// Check that the PVC is really deleted.
pvc, err = c.PersistentVolumeClaims(ns).Get(pvc.Name)
if err == nil {
return pv, pvc, fmt.Errorf("PVC %v deleted yet still exists", pvc.Name)
}
if !apierrs.IsNotFound(err) {
return pv, pvc, fmt.Errorf("Get on deleted PVC %v failed with error other than \"not found\": %v", pvc.Name, err)
}
// Wait for the PV's phase to return to Available
framework.Logf("Waiting for recycling process to complete.")
err = framework.WaitForPersistentVolumePhase(api.VolumeAvailable, c, pv.Name, 3*time.Second, 300*time.Second)
if err != nil {
return pv, pvc, fmt.Errorf("Recycling failed: %v", err)
}
// Examine the pv.ClaimRef and UID. Expect nil values.
pv, err = c.PersistentVolumes().Get(pv.Name)
if err != nil {
return pv, pvc, fmt.Errorf("Cannot re-get PersistentVolume %v:", pv.Name)
}
if pv.Spec.ClaimRef != nil && len(pv.Spec.ClaimRef.UID) > 0 {
crJSON, _ := json.Marshal(pv.Spec.ClaimRef)
return pv, pvc, fmt.Errorf("Expected PV %v's ClaimRef to be nil, or the claimRef's UID to be blank. Instead claimRef is: %v", pv.Name, string(crJSON))
}
return pv, pvc, nil
}
示例9: findPendingPVs
func findPendingPVs(c *k8sclient.Client, ns string) (bool, *api.PersistentVolumeClaimList, []string) {
pvcs, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{})
if err != nil {
util.Infof("Failed to find any PersistentVolumeClaims, %s in namespace %s\n", err, ns)
}
if pvcs != nil {
pendingClaims := pvcs.Items
var pendingClaimNames []string
for _, item := range pendingClaims {
status := item.Status.Phase
if status == api.ClaimPending || status == api.ClaimLost {
pendingClaimNames = append(pendingClaimNames, item.ObjectMeta.Name)
}
}
if len(pendingClaimNames) > 0 {
return true, pvcs, pendingClaimNames
}
}
return false, nil, nil
}
示例10:
// filled in BeforeEach
var c *client.Client
var ns string
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
})
Describe("DynamicProvisioner", func() {
It("should create and delete persistent volumes", func() {
SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a claim with a dynamic provisioning annotation")
claim := createClaim(ns)
defer func() {
c.PersistentVolumeClaims(ns).Delete(claim.Name)
}()
claim, err := c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
err = waitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, claim.Name, poll, claimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = c.PersistentVolumeClaims(ns).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := c.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())
示例11: deleteAllPetSets
func deleteAllPetSets(c *client.Client, ns string) {
pst := &petSetTester{c: c}
psList, err := c.Apps().PetSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
ExpectNoError(err)
// Scale down each petset, then delete it completely.
// Deleting a pvc without doing this will leak volumes, #25101.
errList := []string{}
for _, ps := range psList.Items {
framework.Logf("Scaling petset %v to 0", ps.Name)
if err := pst.scale(&ps, 0); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
framework.Logf("Deleting petset %v", ps.Name)
if err := c.Apps().PetSets(ps.Namespace).Delete(ps.Name, nil); err != nil {
errList = append(errList, fmt.Sprintf("%v", err))
}
}
// pvs are global, so we need to wait for the exact ones bound to the petset pvcs.
pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a petset
pvcPollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
pvcList, err := c.PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
}
for _, pvc := range pvcList.Items {
pvNames.Insert(pvc.Spec.VolumeName)
// TODO: Double check that there are no pods referencing the pvc
framework.Logf("Deleting pvc: %v with volume %v", pvc.Name, pvc.Spec.VolumeName)
if err := c.PersistentVolumeClaims(ns).Delete(pvc.Name); err != nil {
return false, nil
}
}
return true, nil
})
if pvcPollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pvc deletion."))
}
pollErr := wait.PollImmediate(petsetPoll, petsetTimeout, func() (bool, error) {
pvList, err := c.PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
}
waitingFor := []string{}
for _, pv := range pvList.Items {
if pvNames.Has(pv.Name) {
waitingFor = append(waitingFor, fmt.Sprintf("%v: %+v", pv.Name, pv.Status))
}
}
if len(waitingFor) == 0 {
return true, nil
}
framework.Logf("Still waiting for pvs of petset to disappear:\n%v", strings.Join(waitingFor, "\n"))
return false, nil
})
if pollErr != nil {
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
}
if len(errList) != 0 {
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
}
}
示例12:
volumeTestCleanup(c, config)
}()
pod := startVolumeServer(c, config)
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
pv := makePersistentVolume(serverIP)
pvc := makePersistentVolumeClaim(ns)
framework.Logf("Creating PersistentVolume using NFS")
pv, err := c.PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Creating PersistentVolumeClaim")
pvc, err = c.PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
// allow the binder a chance to catch up. should not be more than 20s.
framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 1*time.Second, 30*time.Second)
pv, err = c.PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
if pv.Spec.ClaimRef == nil {
framework.Failf("Expected PersistentVolume to be bound, but got nil ClaimRef: %+v", pv)
}
framework.Logf("Deleting PersistentVolumeClaim to trigger PV Recycling")
err = c.PersistentVolumeClaims(ns).Delete(pvc.Name)
Expect(err).NotTo(HaveOccurred())
示例13: claimClient
// claimClient returns the pvcClient for the given kubeClient/ns.
func claimClient(kubeClient *client.Client, ns string) client.PersistentVolumeClaimInterface {
return kubeClient.PersistentVolumeClaims(ns)
}
示例14:
// If it doesn't exist, create the nfs server pod in "default" ns
// The "default" ns is used so that individual tests can delete
// their ns without impacting the nfs-server pod.
if nfsServerPod == nil {
nfsServerPod = startVolumeServer(c, NFSconfig)
serverIP = nfsServerPod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
}
})
AfterEach(func() {
if c != nil && len(ns) > 0 { // still have client and namespace
if pvc != nil && len(pvc.Name) > 0 {
// Delete the PersistentVolumeClaim
framework.Logf("AfterEach: PVC %v is non-nil, deleting claim", pvc.Name)
err := c.PersistentVolumeClaims(ns).Delete(pvc.Name)
if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolumeClaim %v error: %v", pvc.Name, err)
}
pvc = nil
}
if pv != nil && len(pv.Name) > 0 {
framework.Logf("AfterEach: PV %v is non-nil, deleting pv", pv.Name)
err := c.PersistentVolumes().Delete(pv.Name)
if err != nil && !apierrs.IsNotFound(err) {
framework.Logf("AfterEach: delete of PersistentVolume %v error: %v", pv.Name, err)
}
pv = nil
}
}
})
示例15: testDynamicProvisioning
func testDynamicProvisioning(client *client.Client, claim *api.PersistentVolumeClaim) {
err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
requestedCapacity := resource.MustParse(requestedSize)
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
// Check PV properties
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable")
runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
By("deleting the claim")
framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))
// Wait for the PV to get deleted too.
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}