本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/internalclientset.NewForConfigOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang NewForConfigOrDie函数的具体用法?Golang NewForConfigOrDie怎么用?Golang NewForConfigOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewForConfigOrDie函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: rmSetup
func rmSetup(t *testing.T, enableGarbageCollector bool) (*httptest.Server, *replication.ReplicationManager, cache.SharedIndexInformer, clientset.Interface) {
masterConfig := framework.NewIntegrationTestMasterConfig()
_, s := framework.RunAMaster(masterConfig)
config := restclient.Config{Host: s.URL}
clientSet, err := clientset.NewForConfig(&config)
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
resyncPeriod := 12 * time.Hour
resyncPeriodFunc := func() time.Duration {
return resyncPeriod
}
podInformer := informers.NewPodInformer(internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "pod-informer")), resyncPeriod)
rm := replication.NewReplicationManager(
podInformer,
internalclientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "replication-controller")),
resyncPeriodFunc,
replication.BurstReplicas,
4096,
enableGarbageCollector,
)
if err != nil {
t.Fatalf("Failed to create replication manager")
}
return s, rm, podInformer, clientSet
}
示例2: createClients
func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
PluginName: "plugin-name",
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}}
cloud := &fake_cloud.FakeCloud{}
syncPeriod := getSyncPeriod()
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, nil, plugins, cloud, "", nil, nil, nil)
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
示例3: createClients
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
QPS: 1000000,
Burst: 1000000,
})
testClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
QPS: 1000000,
Burst: 1000000,
})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil, "" /* rootContext */)
plugin := &volumetest.FakeVolumePlugin{
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}
plugins := []volume.VolumePlugin{plugin}
cloud := &fake_cloud.FakeCloud{}
syncPeriod = getSyncPeriod(syncPeriod)
ctrl := persistentvolumecontroller.NewPersistentVolumeController(
binderClient,
syncPeriod,
nil, // alpha provisioner
plugins,
cloud,
"", // cluster name
nil, // volumeSource
nil, // claimSource
nil, // classSource
nil, // eventRecorder
true) // enableDynamicProvisioning
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
示例4: createClients
func createClients(ns *api.Namespace, t *testing.T, s *httptest.Server, syncPeriod time.Duration) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
testClient := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
QPS: 1000000,
Burst: 1000000,
})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugin := &volumetest.FakeVolumePlugin{
PluginName: provisionerPluginName,
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}
plugins := []volume.VolumePlugin{plugin}
cloud := &fake_cloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewController(
persistentvolumecontroller.ControllerParameters{
KubeClient: binderClient,
SyncPeriod: getSyncPeriod(syncPeriod),
VolumePlugins: plugins,
Cloud: cloud,
EnableDynamicProvisioning: true,
})
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(ns.Name).Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
示例5: TestDSManagerInit
func TestDSManagerInit(t *testing.T) {
// Insert a stable daemon set and make sure we don't create an extra pod
// for the one node which already has a daemon after a simulated restart.
ds := newDaemonSet("test")
ds.Status = extensions.DaemonSetStatus{
CurrentNumberScheduled: 1,
NumberMisscheduled: 0,
DesiredNumberScheduled: 1,
}
nodeName := "only-node"
podList := &api.PodList{
Items: []api.Pod{
*newPod("podname", nodeName, simpleDaemonSetLabel),
}}
response := runtime.EncodeOrDie(testapi.Default.Codec(), podList)
fakeHandler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: response,
}
testServer := httptest.NewServer(&fakeHandler)
// TODO: Uncomment when fix #19254
// defer testServer.Close()
clientset := clientset.NewForConfigOrDie(&client.Config{Host: testServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewDaemonSetsController(clientset, controller.NoResyncPeriodFunc)
manager.dsStore.Add(ds)
manager.nodeStore.Add(newNode(nodeName, nil))
manager.podStoreSynced = alwaysReady
controller.SyncAllPodsWithStore(manager.kubeClient, manager.podStore.Store)
fakePodControl := &controller.FakePodControl{}
manager.podControl = fakePodControl
manager.syncHandler(getKey(ds, t))
validateSyncDaemonSets(t, fakePodControl, 0, 0)
}
示例6: Setup
func (c *Client) Setup(t *testing.T) *Client {
c.handler = &utiltesting.FakeHandler{
StatusCode: c.Response.StatusCode,
}
if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil {
c.handler.ResponseBody = *responseBody
}
c.server = httptest.NewServer(c.handler)
if c.Client == nil {
c.Client = client.NewOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
})
c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL})
}
c.QueryValidator = map[string]func(string, string) bool{}
return c
}
示例7: TestSyncJobExpectations
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestSyncJobExpectations(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.updateHandler = func(job *extensions.Job) error { return nil }
job := newJob(2, 2)
manager.jobStore.Store.Add(job)
pods := newPodList(2, api.PodPending, job)
manager.podStore.Store.Add(&pods[0])
manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job
// will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations.
manager.podStore.Store.Add(&pods[1])
},
}
manager.syncJob(getKey(job, t))
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
}
示例8: TestSyncPastDeadlineJobFinished
func TestSyncPastDeadlineJobFinished(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewJobController(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
var actual *extensions.Job
manager.updateHandler = func(job *extensions.Job) error {
actual = job
return nil
}
job := newJob(1, 1)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0)
job.Status.StartTime = &start
job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
manager.jobStore.Store.Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unexpected error when syncing jobs %v", err)
}
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
if actual != nil {
t.Error("Unexpected job modification")
}
}
示例9: TestSyncJobUpdateRequeue
func TestSyncJobUpdateRequeue(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
updateError := fmt.Errorf("Update error")
manager.updateHandler = func(job *batch.Job) error {
manager.queue.AddRateLimited(getKey(job, t))
return updateError
}
job := newJob(2, 2)
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
err := manager.syncJob(getKey(job, t))
if err == nil || err != updateError {
t.Errorf("Expected error %v when syncing jobs, got %v", updateError, err)
}
t.Log("Waiting for a job in the queue")
key, _ := manager.queue.Get()
expectedKey := getKey(job, t)
if key != expectedKey {
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
}
}
示例10: TestBind
func TestBind(t *testing.T) {
table := []struct {
binding *api.Binding
}{
{binding: &api.Binding{
ObjectMeta: api.ObjectMeta{
Namespace: api.NamespaceDefault,
Name: "foo",
},
Target: api.ObjectReference{
Name: "foohost.kubernetes.mydomain.com",
},
}},
}
for _, item := range table {
handler := utiltesting.FakeHandler{
StatusCode: 200,
ResponseBody: "",
T: t,
}
server := httptest.NewServer(&handler)
defer server.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
b := binder{client}
if err := b.Bind(item.binding); err != nil {
t.Errorf("Unexpected error: %v", err)
continue
}
expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding)
handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody)
}
}
示例11: TestRCManagerNotReady
func TestRCManagerNotReady(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
manager.podControl = &fakePodControl
manager.podStoreSynced = func() bool { return false }
// Simulates the rc reflector running before the pod reflector. We don't
// want to end up creating replicas in this case until the pod reflector
// has synced, so the rc manager should just requeue the rc.
controllerSpec := newReplicationController(1)
manager.rcStore.Indexer.Add(controllerSpec)
rcKey := getKey(controllerSpec, t)
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 0, 0, 0)
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
manager.podStoreSynced = alwaysReady
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 1, 0, 0)
}
示例12: TestOverlappingRSs
func TestOverlappingRSs(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
labelMap := map[string]string{"foo": "bar"}
for i := 0; i < 5; i++ {
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady
// Create 10 ReplicaSets, shuffled them randomly and insert them into the ReplicaSet controller's store
var controllers []*extensions.ReplicaSet
for j := 1; j < 10; j++ {
rsSpec := newReplicaSet(1, labelMap)
rsSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
rsSpec.Name = string(uuid.NewUUID())
controllers = append(controllers, rsSpec)
}
shuffledControllers := shuffle(controllers)
for j := range shuffledControllers {
manager.rsStore.Store.Add(shuffledControllers[j])
}
// Add a pod and make sure only the oldest ReplicaSet is synced
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
rsKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0])
queueRS, _ := manager.queue.Get()
if queueRS != rsKey {
t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
}
}
}
示例13: TestSyncEndpointsItemsPreexistingIdentical
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
ns := api.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault,
serverResponse{http.StatusOK, &api.Endpoints{
ObjectMeta: api.ObjectMeta{
ResourceVersion: "1",
Name: "foo",
Namespace: ns,
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Store, api.NamespaceDefault, 1, 1, 0)
endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
},
})
endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil)
}
示例14: TestSyncEndpointsItemsPreserveNoSelector
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
ns := api.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: ns,
ResourceVersion: "1",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
Ports: []api.EndpointPort{{Port: 1000}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}},
})
endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 0)
}
示例15: TestCheckLeftoverEndpoints
func TestCheckLeftoverEndpoints(t *testing.T) {
ns := api.NamespaceDefault
// Note that this requests *all* endpoints, therefore the NamespaceAll
// below.
testServer, _ := makeTestServer(t, api.NamespaceAll,
serverResponse{http.StatusOK, &api.EndpointsList{
ListMeta: unversioned.ListMeta{
ResourceVersion: "1",
},
Items: []api.Endpoints{{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: ns,
ResourceVersion: "1",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
Ports: []api.EndpointPort{{Port: 1000}},
}},
}},
}})
defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady
endpoints.checkLeftoverEndpoints()
if e, a := 1, endpoints.queue.Len(); e != a {
t.Fatalf("Expected %v, got %v", e, a)
}
got, _ := endpoints.queue.Get()
if e, a := ns+"/foo", got; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}