本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/testing/fake.NewSimpleClientset函数的典型用法代码示例。如果您正苦于以下问题:Golang NewSimpleClientset函数的具体用法?Golang NewSimpleClientset怎么用?Golang NewSimpleClientset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewSimpleClientset函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestSyncResourceQuotaNoChange
func TestSyncResourceQuotaNoChange(t *testing.T) {
quota := api.ResourceQuota{
Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&api.PodList{}, "a)
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
err := ResourceQuotaController.syncResourceQuota(quota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
actions := kubeClient.Actions()
if len(actions) != 1 && !actions[0].Matches("list", "pods") {
t.Errorf("SyncResourceQuota made an unexpected client action when state was not dirty: %v", kubeClient.Actions)
}
}
示例2: TestIncrementUsageReplicationControllers
func TestIncrementUsageReplicationControllers(t *testing.T) {
namespace := "default"
client := fake.NewSimpleClientset(&api.ReplicationControllerList{
Items: []api.ReplicationController{
{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
},
},
})
status := &api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
r := api.ResourceReplicationControllers
status.Hard[r] = resource.MustParse("2")
status.Used[r] = resource.MustParse("1")
dirty, err := IncrementUsage(admission.NewAttributesRecord(&api.ReplicationController{}, api.Kind("ReplicationController"), namespace, "name", api.Resource("replicationcontrollers"), "", admission.Create, nil), status, client)
if err != nil {
t.Errorf("Unexpected error: %v", err)
}
if !dirty {
t.Errorf("Expected the status to get incremented, therefore should have been dirty")
}
quantity := status.Used[r]
if quantity.Value() != int64(2) {
t.Errorf("Expected new item count to be 2, but was %s", quantity.String())
}
}
示例3: TestSAR
func TestSAR(t *testing.T) {
store := projectcache.NewCacheStore(cache.IndexFuncToKeyFuncAdapter(cache.MetaNamespaceIndexFunc))
mockClient := &testclient.Fake{}
mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
return true, nil, fmt.Errorf("shouldn't get here")
})
cache := projectcache.NewFake(mockClient.Namespaces(), store, "")
mockClientset := clientsetfake.NewSimpleClientset()
handler := &lifecycle{client: mockClientset}
handler.SetProjectCache(cache)
tests := map[string]struct {
kind string
resource string
}{
"subject access review": {
kind: "SubjectAccessReview",
resource: "subjectaccessreviews",
},
"local subject access review": {
kind: "LocalSubjectAccessReview",
resource: "localsubjectaccessreviews",
},
}
for k, v := range tests {
err := handler.Admit(admission.NewAttributesRecord(nil, kapi.Kind(v.kind), "foo", "name", kapi.Resource(v.resource), "", "CREATE", nil))
if err != nil {
t.Errorf("Unexpected error for %s returned from admission handler: %v", k, err)
}
}
}
示例4: TestPlugin
func TestPlugin(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_secret_namespace"
testName = "test_secret_name"
volumeSpec = volumeSpec(testVolumeName, testName)
secret = secret(testNamespace, testName)
client = fake.NewSimpleClientset(&secret)
pluginMgr = volume.VolumePluginMgr{}
rootDir, host = newTestHost(t, client)
)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(secretPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder")
}
volumePath := builder.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
err = builder.SetUp(nil)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
// secret volume should create its own empty wrapper path
podWrapperMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid/plugins/kubernetes.io~empty-dir/wrapped_test_volume_name", rootDir)
if _, err := os.Stat(podWrapperMetadataDir); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, empty-dir wrapper path is not created: %s", podWrapperMetadataDir)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestSecretDataInVolume(volumePath, secret, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
示例5: TestDoNotDeleteMirrorPods
func TestDoNotDeleteMirrorPods(t *testing.T) {
staticPod := getTestPod()
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
mirrorPod := getTestPod()
mirrorPod.UID = "mirror-12345678"
mirrorPod.Annotations = map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
}
// Set the deletion timestamp.
mirrorPod.DeletionTimestamp = new(unversioned.Time)
client := fake.NewSimpleClientset(mirrorPod)
m := newTestManager(client)
m.podManager.AddPod(staticPod)
m.podManager.AddPod(mirrorPod)
// Verify setup.
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
status := getRandomPodStatus()
now := unversioned.Now()
status.StartTime = &now
m.SetPodStatus(staticPod, status)
m.testSyncBatch()
// Expect not to see an delete action.
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
})
}
示例6: TestCasting
func TestCasting(t *testing.T) {
clientset := fake.NewSimpleClientset()
binder := NewPersistentVolumeClaimBinder(clientset, 1*time.Second)
pv := &api.PersistentVolume{}
unk := cache.DeletedFinalStateUnknown{}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Status: api.PersistentVolumeClaimStatus{Phase: api.ClaimBound},
}
// Inject mockClient into the binder. This prevents weird errors on stderr
// as the binder wants to load PV/PVC from API server.
mockClient := &mockBinderClient{
volume: pv,
claim: pvc,
}
binder.client = mockClient
// none of these should fail casting.
// the real test is not failing when passed DeletedFinalStateUnknown in the deleteHandler
binder.addVolume(pv)
binder.updateVolume(pv, pv)
binder.deleteVolume(pv)
binder.deleteVolume(unk)
binder.addClaim(pvc)
binder.updateClaim(pvc, pvc)
}
示例7: TestLimitRangerCacheAndLRUExpiredMisses
func TestLimitRangerCacheAndLRUExpiredMisses(t *testing.T) {
liveLookupCache, err := lru.New(10000)
if err != nil {
t.Fatal(err)
}
limitRange := validLimitRangeNoDefaults()
client := fake.NewSimpleClientset(&limitRange)
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
limitFunc: Limit,
indexer: indexer,
liveLookupCache: liveLookupCache,
}
testPod := validPod("testPod", 1, api.ResourceRequirements{})
// add to the lru cache
liveLookupCache.Add(limitRange.Namespace, liveLookupEntry{expiry: time.Now().Add(time.Duration(-30 * time.Second)), items: []*api.LimitRange{}})
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
}
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
if err != nil {
t.Errorf("Should have ignored calls to any subresource of pod %v", err)
}
}
示例8: TestLimitRangerIgnoresSubresource
func TestLimitRangerIgnoresSubresource(t *testing.T) {
client := fake.NewSimpleClientset()
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
handler := &limitRanger{
Handler: admission.NewHandler(admission.Create, admission.Update),
client: client,
limitFunc: Limit,
indexer: indexer,
}
limitRange := validLimitRangeNoDefaults()
testPod := validPod("testPod", 1, api.ResourceRequirements{})
indexer.Add(&limitRange)
err := handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an error since the pod did not specify resource limits in its update call")
}
err = handler.Admit(admission.NewAttributesRecord(&testPod, api.Kind("Pod"), limitRange.Namespace, "testPod", api.Resource("pods"), "status", admission.Update, nil))
if err != nil {
t.Errorf("Should have ignored calls to any subresource of pod %v", err)
}
}
示例9: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
ep := &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "ep",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []api.EndpointPort{{"foo", 80, api.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly")
}
}
示例10: TestStaticPodStatus
func TestStaticPodStatus(t *testing.T) {
staticPod := getTestPod()
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
mirrorPod := getTestPod()
mirrorPod.UID = "mirror-12345678"
mirrorPod.Annotations = map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
}
client := fake.NewSimpleClientset(mirrorPod)
m := newTestManager(client)
m.podManager.AddPod(staticPod)
m.podManager.AddPod(mirrorPod)
// Verify setup.
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
status := getRandomPodStatus()
now := unversioned.Now()
status.StartTime = &now
m.SetPodStatus(staticPod, status)
retrievedStatus := expectPodStatus(t, m, staticPod)
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
// Should translate mirrorPod / staticPod UID.
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
})
updateAction := client.Actions()[1].(core.UpdateActionImpl)
updatedPod := updateAction.Object.(*api.Pod)
assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
client.ClearActions()
// No changes.
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{})
// Mirror pod identity changes.
m.podManager.DeletePod(mirrorPod)
mirrorPod.UID = "new-mirror-pod"
mirrorPod.Status = api.PodStatus{}
m.podManager.AddPod(mirrorPod)
// Expect update to new mirrorPod.
m.testSyncBatch()
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: "pods"}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: "pods", Subresource: "status"}},
})
updateAction = client.Actions()[1].(core.UpdateActionImpl)
updatedPod = updateAction.Object.(*api.Pod)
assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
}
示例11: TestPluginIdempotent
// Test the case where the 'ready' file has been created and the pod volume dir
// is a mountpoint. Mount should not be called.
func TestPluginIdempotent(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid2")
testVolumeName = "test_volume_name"
testNamespace = "test_secret_namespace"
testName = "test_secret_name"
volumeSpec = volumeSpec(testVolumeName, testName)
secret = secret(testNamespace, testName)
client = fake.NewSimpleClientset(&secret)
pluginMgr = volume.VolumePluginMgr{}
rootDir, host = newTestHost(t, client)
)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(secretPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir)
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
mounter := host.GetMounter().(*mount.FakeMounter)
mounter.MountPoints = []mount.MountPoint{
{
Path: podVolumeDir,
},
}
util.SetReady(podMetadataDir)
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder")
}
volumePath := builder.GetPath()
err = builder.SetUp(nil)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if len(mounter.Log) != 0 {
t.Errorf("Unexpected calls made to mounter: %v", mounter.Log)
}
if _, err := os.Stat(volumePath); err != nil {
if !os.IsNotExist(err) {
t.Errorf("SetUp() failed unexpectedly: %v", err)
}
} else {
t.Errorf("volume path should not exist: %v", volumePath)
}
}
示例12: TestSyncResourceQuotaSpecChange
func TestSyncResourceQuotaSpecChange(t *testing.T) {
quota := api.ResourceQuota{
Spec: api.ResourceQuotaSpec{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
},
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("3"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
},
},
}
expectedUsage := api.ResourceQuota{
Status: api.ResourceQuotaStatus{
Hard: api.ResourceList{
api.ResourceCPU: resource.MustParse("4"),
},
Used: api.ResourceList{
api.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset("a)
ResourceQuotaController := NewResourceQuotaController(kubeClient, controller.StaticResyncPeriodFunc(time.Second))
err := ResourceQuotaController.syncResourceQuota(quota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
usage := kubeClient.Actions()[1].(testclient.UpdateAction).GetObject().(*api.ResourceQuota)
// ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard {
actual := usage.Status.Hard[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Hard: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
for k, v := range expectedUsage.Status.Used {
actual := usage.Status.Used[k]
actualValue := actual.String()
expectedValue := v.String()
if expectedValue != actualValue {
t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
}
}
}
示例13: TestFailedRecycling
func TestFailedRecycling(t *testing.T) {
pv := &api.PersistentVolume{
Spec: api.PersistentVolumeSpec{
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
Capacity: api.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("8Gi"),
},
PersistentVolumeSource: api.PersistentVolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/somepath/data02",
},
},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
ClaimRef: &api.ObjectReference{
Name: "foo",
Namespace: "bar",
},
},
Status: api.PersistentVolumeStatus{
Phase: api.VolumeReleased,
},
}
mockClient := &mockBinderClient{
volume: pv,
}
// no Init called for pluginMgr and no plugins are available. Volume should fail recycling.
plugMgr := volume.VolumePluginMgr{}
recycler := &PersistentVolumeRecycler{
kubeClient: fake.NewSimpleClientset(),
client: mockClient,
pluginMgr: plugMgr,
}
err := recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Unexpected non-nil error: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumeFailed {
t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
}
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
err = recycler.reclaimVolume(pv)
if err != nil {
t.Errorf("Unexpected non-nil error: %v", err)
}
if mockClient.volume.Status.Phase != api.VolumeFailed {
t.Errorf("Expected %s but got %s", api.VolumeFailed, mockClient.volume.Status.Phase)
}
}
示例14: TestIncrementUsageOnUpdateIgnoresNonPodResources
func TestIncrementUsageOnUpdateIgnoresNonPodResources(t *testing.T) {
testCase := []struct {
kind unversioned.GroupKind
resource unversioned.GroupResource
subresource string
object runtime.Object
}{
{
kind: api.Kind("Service"),
resource: api.Resource("services"),
object: &api.Service{},
},
{
kind: api.Kind("ReplicationController"),
resource: api.Resource("replicationcontrollers"),
object: &api.ReplicationController{},
},
{
kind: api.Kind("ResourceQuota"),
resource: api.Resource("resourcequotas"),
object: &api.ResourceQuota{},
},
{
kind: api.Kind("Secret"),
resource: api.Resource("secrets"),
object: &api.Secret{},
},
{
kind: api.Kind("PersistentVolumeClaim"),
resource: api.Resource("persistentvolumeclaims"),
object: &api.PersistentVolumeClaim{},
},
}
for _, testCase := range testCase {
client := fake.NewSimpleClientset()
status := &api.ResourceQuotaStatus{
Hard: api.ResourceList{},
Used: api.ResourceList{},
}
r := resourceToResourceName[testCase.resource]
status.Hard[r] = resource.MustParse("2")
status.Used[r] = resource.MustParse("1")
attributesRecord := admission.NewAttributesRecord(testCase.object, testCase.kind, "my-ns", "new-thing",
testCase.resource, testCase.subresource, admission.Update, nil)
dirty, err := IncrementUsage(attributesRecord, status, client)
if err != nil {
t.Errorf("Increment usage of resource %v had unexpected error: %v", testCase.resource, err)
}
if dirty {
t.Errorf("Increment usage of resource %v should not result in a dirty quota on update", testCase.resource)
}
}
}
示例15: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
lun := 0
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{
Name: "pvA",
},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{
FC: &api.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &api.ObjectReference{
Name: "claimA",
},
},
}
claim := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: api.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: api.PersistentVolumeClaimStatus{
Phase: api.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly")
}
}