本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_5/fake.NewSimpleClientset函数的典型用法代码示例。如果您正苦于以下问题:Golang NewSimpleClientset函数的具体用法?Golang NewSimpleClientset怎么用?Golang NewSimpleClientset使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewSimpleClientset函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestPlugin
func TestPlugin(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid")
testVolumeName = "test_volume_name"
testNamespace = "test_configmap_namespace"
testName = "test_configmap_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
configMap = configMap(testNamespace, testName)
client = fake.NewSimpleClientset(&configMap)
pluginMgr = volume.VolumePluginMgr{}
tempDir, host = newTestHost(t, client)
)
defer os.RemoveAll(tempDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(configMapPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
vName, err := plugin.GetVolumeName(volume.NewSpecFromVolume(volumeSpec))
if err != nil {
t.Errorf("Failed to GetVolumeName: %v", err)
}
if vName != "test_volume_name/test_configmap_name" {
t.Errorf("Got unexpect VolumeName %v", vName)
}
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
fsGroup := int64(1001)
err = mounter.SetUp(&fsGroup)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestConfigMapDataInVolume(volumePath, configMap, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
示例2: TestDoNotDeleteMirrorPods
func TestDoNotDeleteMirrorPods(t *testing.T) {
staticPod := getTestPod()
staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"}
mirrorPod := getTestPod()
mirrorPod.UID = "mirror-12345678"
mirrorPod.Annotations = map[string]string{
kubetypes.ConfigSourceAnnotationKey: "api",
kubetypes.ConfigMirrorAnnotationKey: "mirror",
}
// Set the deletion timestamp.
mirrorPod.DeletionTimestamp = new(metav1.Time)
client := fake.NewSimpleClientset(mirrorPod)
m := newTestManager(client)
m.podManager.AddPod(staticPod)
m.podManager.AddPod(mirrorPod)
// Verify setup.
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
status := getRandomPodStatus()
now := metav1.Now()
status.StartTime = &now
m.SetPodStatus(staticPod, status)
m.testSyncBatch()
// Expect not to see an delete action.
verifyActions(t, m.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
}
示例3: TestGetNodeAddresses
// TestGetNodeAddresses verifies that proper results are returned
// when requesting node addresses.
func TestGetNodeAddresses(t *testing.T) {
assert := assert.New(t)
fakeNodeClient := fake.NewSimpleClientset(makeNodeList([]string{"node1", "node2"}, apiv1.NodeResources{})).Core().Nodes()
addressProvider := nodeAddressProvider{fakeNodeClient}
// Fail case (no addresses associated with nodes)
nodes, _ := fakeNodeClient.List(apiv1.ListOptions{})
addrs, err := addressProvider.externalAddresses()
assert.Error(err, "addresses should have caused an error as there are no addresses.")
assert.Equal([]string(nil), addrs)
// Pass case with External type IP
nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
for index := range nodes.Items {
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeExternalIP, Address: "127.0.0.1"}}
fakeNodeClient.Update(&nodes.Items[index])
}
addrs, err = addressProvider.externalAddresses()
assert.NoError(err, "addresses should not have returned an error.")
assert.Equal([]string{"127.0.0.1", "127.0.0.1"}, addrs)
// Pass case with LegacyHost type IP
nodes, _ = fakeNodeClient.List(apiv1.ListOptions{})
for index := range nodes.Items {
nodes.Items[index].Status.Addresses = []apiv1.NodeAddress{{Type: apiv1.NodeLegacyHostIP, Address: "127.0.0.2"}}
fakeNodeClient.Update(&nodes.Items[index])
}
addrs, err = addressProvider.externalAddresses()
assert.NoError(err, "addresses failback should not have returned an error.")
assert.Equal([]string{"127.0.0.2", "127.0.0.2"}, addrs)
}
示例4: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: v1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
示例5: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("iscsi_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: "127.0.0.1:3260",
IQN: "iqn.2014-12.server:storage.target01",
FSType: "ext4",
Lun: 0,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
示例6: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("fc_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
lun := int32(0)
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
FC: &v1.FCVolumeSource{
TargetWWNs: []string{"some_wwn"},
FSType: "ext4",
Lun: &lun,
},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
示例7: TestFederationQueryWithoutCache
func TestFederationQueryWithoutCache(t *testing.T) {
kd := newKubeDNS()
kd.config.Federations = map[string]string{
"myfederation": "example.com",
"secondfederation": "second.example.com",
}
kd.kubeClient = fake.NewSimpleClientset(newNodes())
testValidFederationQueries(t, kd)
testInvalidFederationQueries(t, kd)
}
示例8: TestSyncBatch
func TestSyncBatch(t *testing.T) {
syncer := newTestManager(&fake.Clientset{})
testPod := getTestPod()
syncer.kubeClient = fake.NewSimpleClientset(testPod)
syncer.SetPodStatus(testPod, getRandomPodStatus())
syncer.testSyncBatch()
verifyActions(t, syncer.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
},
)
}
示例9: TestPluginReboot
// Test the case where the plugin's ready file exists, but the volume dir is not a
// mountpoint, which is the state the system will be in after reboot. The dir
// should be mounter and the secret data written to it.
func TestPluginReboot(t *testing.T) {
var (
testPodUID = types.UID("test_pod_uid3")
testVolumeName = "test_volume_name"
testNamespace = "test_secret_namespace"
testName = "test_secret_name"
volumeSpec = volumeSpec(testVolumeName, testName, 0644)
secret = secret(testNamespace, testName)
client = fake.NewSimpleClientset(&secret)
pluginMgr = volume.VolumePluginMgr{}
rootDir, host = newTestHost(t, client)
)
defer os.RemoveAll(rootDir)
pluginMgr.InitPlugins(ProbeVolumePlugins(), host)
plugin, err := pluginMgr.FindPluginByName(secretPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Namespace: testNamespace, UID: testPodUID}}
mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir)
util.SetReady(podMetadataDir)
volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath)
}
err = mounter.SetUp(nil)
if err != nil {
t.Errorf("Failed to setup volume: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
doTestSecretDataInVolume(volumePath, secret, t)
doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
}
示例10: TestUpdateNodeStatusError
func TestUpdateNodeStatusError(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kubelet := testKubelet.kubelet
// No matching node for the kubelet
testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
if err := kubelet.updateNodeStatus(); err == nil {
t.Errorf("unexpected non error: %v", err)
}
if len(testKubelet.fakeKubeClient.Actions()) != nodeStatusUpdateRetry {
t.Errorf("unexpected actions: %v", testKubelet.fakeKubeClient.Actions())
}
}
示例11: TestServiceEvaluatorMatchesResources
func TestServiceEvaluatorMatchesResources(t *testing.T) {
kubeClient := fake.NewSimpleClientset()
evaluator := NewServiceEvaluator(kubeClient)
expected := quota.ToSet([]api.ResourceName{
api.ResourceServices,
api.ResourceServicesNodePorts,
api.ResourceServicesLoadBalancers,
})
actual := quota.ToSet(evaluator.MatchesResources())
if !expected.Equal(actual) {
t.Errorf("expected: %v, actual: %v", expected, actual)
}
}
示例12: TestReconcilePodStatus
func TestReconcilePodStatus(t *testing.T) {
testPod := getTestPod()
client := fake.NewSimpleClientset(testPod)
syncer := newTestManager(client)
syncer.SetPodStatus(testPod, getRandomPodStatus())
// Call syncBatch directly to test reconcile
syncer.syncBatch() // The apiStatusVersions should be set now
podStatus, ok := syncer.GetPodStatus(testPod.UID)
if !ok {
t.Fatalf("Should find pod status for pod: %#v", testPod)
}
testPod.Status = podStatus
// If the pod status is the same, a reconciliation is not needed,
// syncBatch should do nothing
syncer.podManager.UpdatePod(testPod)
if syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status is the same, a reconciliation is not needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{})
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
// a reconciliation is not needed, syncBatch should do nothing.
// The StartTime should have been set in SetPodStatus().
// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
testPod.Status.StartTime = &normalizedStartTime
syncer.podManager.UpdatePod(testPod)
if syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{})
// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
testPod.Status = getRandomPodStatus()
syncer.podManager.UpdatePod(testPod)
if !syncer.needsReconcile(testPod.UID, podStatus) {
t.Errorf("Pod status is different, a reconciliation is needed")
}
client.ClearActions()
syncer.syncBatch()
verifyActions(t, client, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
})
}
示例13: TestSyncResourceQuotaNoChange
func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := v1.ResourceQuota{
ObjectMeta: v1.ObjectMeta{
Namespace: "default",
Name: "rq",
},
Spec: v1.ResourceQuotaSpec{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1.ResourceQuotaStatus{
Hard: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("4"),
},
Used: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("0"),
},
},
}
kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc,
Registry: install.NewRegistry(kubeClient, nil),
GroupKindsToReplenish: []schema.GroupKind{
api.Kind("Pod"),
api.Kind("Service"),
api.Kind("ReplicationController"),
api.Kind("PersistentVolumeClaim"),
},
ControllerFactory: NewReplenishmentControllerFactoryFromClient(kubeClient),
ReplenishmentResyncPeriod: controller.NoResyncPeriodFunc,
}
quotaController := NewResourceQuotaController(resourceQuotaControllerOptions)
err := quotaController.syncResourceQuota(resourceQuota)
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
expectedActionSet := sets.NewString(
strings.Join([]string{"list", "pods", ""}, "-"),
)
actionSet := sets.NewString()
for _, action := range kubeClient.Actions() {
actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
}
if !actionSet.HasAll(expectedActionSet.List()...) {
t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
}
}
示例14: TestSyncBatchChecksMismatchedUID
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
syncer := newTestManager(&fake.Clientset{})
pod := getTestPod()
pod.UID = "first"
syncer.podManager.AddPod(pod)
differentPod := getTestPod()
differentPod.UID = "second"
syncer.podManager.AddPod(differentPod)
syncer.kubeClient = fake.NewSimpleClientset(pod)
syncer.SetPodStatus(differentPod, getRandomPodStatus())
syncer.testSyncBatch()
verifyActions(t, syncer.kubeClient, []core.Action{
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
})
}
示例15: TestPersistentClaimReadOnlyFlag
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: v1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Quobyte: &v1.QuobyteVolumeSource{Registry: "reg:7861", Volume: "vol", ReadOnly: false, User: "root", Group: "root"},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: v1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
tmpDir, err := utiltesting.MkTmpdir("quobyte_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(quobytePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}