本文整理匯總了Golang中k8s/io/kubernetes/pkg/client/unversioned.Client.Storage方法的典型用法代碼示例。如果您正苦於以下問題:Golang Client.Storage方法的具體用法?Golang Client.Storage怎麽用?Golang Client.Storage使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/kubernetes/pkg/client/unversioned.Client
的用法示例。
在下文中一共展示了Client.Storage方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1:
// filled in BeforeEach
var c *client.Client
var ns string
BeforeEach(func() {
c = f.Client
ns = f.Namespace.Name
})
framework.KubeDescribe("DynamicProvisioner", func() {
It("should create and delete persistent volumes [Slow]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke")
By("creating a StorageClass")
class := newStorageClass()
_, err := c.Storage().StorageClasses().Create(class)
defer c.Storage().StorageClasses().Delete(class.Name)
Expect(err).NotTo(HaveOccurred())
By("creating a claim with a dynamic provisioning annotation")
claim := newClaim(ns, false)
defer func() {
c.PersistentVolumeClaims(ns).Delete(claim.Name)
}()
claim, err = c.PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
testDynamicProvisioning(c, claim)
})
})
示例2: testDynamicProvisioning
func testDynamicProvisioning(t storageClassTest, client *client.Client, claim *api.PersistentVolumeClaim, class *storage.StorageClass) {
if class != nil {
By("creating a StorageClass " + class.Name)
class, err := client.Storage().StorageClasses().Create(class)
defer func() {
framework.Logf("deleting storage class %s", class.Name)
client.Storage().StorageClasses().Delete(class.Name)
}()
Expect(err).NotTo(HaveOccurred())
}
By("creating a claim")
claim, err := client.PersistentVolumeClaims(claim.Namespace).Create(claim)
defer func() {
framework.Logf("deleting claim %s/%s", claim.Namespace, claim.Name)
client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name)
}()
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.PersistentVolumeClaims(claim.Namespace).Get(claim.Name)
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.PersistentVolumes().Get(claim.Spec.VolumeName)
Expect(err).NotTo(HaveOccurred())
// Check sizes
expectedCapacity := resource.MustParse(t.expectedSize)
pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
requestedCapacity := resource.MustParse(t.claimSize)
claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
// Check PV properties
By("checking the PV")
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
// Run the checker
if t.pvCheck != nil {
err = t.pvCheck(pv)
Expect(err).NotTo(HaveOccurred())
}
// We start two pods:
// - The first writes 'hello word' to the /mnt/test (= the volume).
// - The second one runs grep 'hello world' on /mnt/test.
// If both succeed, Kubernetes actually allocated something that is
// persistent across pods.
By("checking the created volume is writable")
runInPodWithVolume(client, claim.Namespace, claim.Name, "echo 'hello world' > /mnt/test/data")
By("checking the created volume is readable and retains data")
runInPodWithVolume(client, claim.Namespace, claim.Name, "grep 'hello world' /mnt/test/data")
// Ugly hack: if we delete the AWS/GCE/OpenStack volume here, it will
// probably collide with destruction of the pods above - the pods
// still have the volume attached (kubelet is slow...) and deletion
// of attached volume is not allowed by AWS/GCE/OpenStack.
// Kubernetes *will* retry deletion several times in
// pvclaimbinder-sync-period.
// So, technically, this sleep is not needed. On the other hand,
// the sync perion is 10 minutes and we really don't want to wait
// 10 minutes here. There is no way how to see if kubelet is
// finished with cleaning volumes. A small sleep here actually
// speeds up the test!
// Three minutes should be enough to clean up the pods properly.
// We've seen GCE PD detach to take more than 1 minute.
By("Sleeping to let kubelet destroy all pods")
time.Sleep(3 * time.Minute)
By("deleting the claim")
framework.ExpectNoError(client.PersistentVolumeClaims(claim.Namespace).Delete(claim.Name))
// Wait for the PV to get deleted too.
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}