本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.NewOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang NewOrDie函数的具体用法?Golang NewOrDie怎么用?Golang NewOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewOrDie函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestPersistentVolumeDeleter
func TestPersistentVolumeDeleter(t *testing.T) {
_, s := runAMaster(t)
defer s.Close()
deleteAllEtcdKeys()
binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := volumeclaimbinder.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler.Run()
defer recycler.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
},
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
},
}
w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), "0")
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
for {
event := <-w.ResultChan()
if event.Type == watch.Deleted {
break
}
}
}
示例2: NewKubeletProvider
func NewKubeletProvider(uri *url.URL) (MetricsSourceProvider, error) {
// create clients
kubeConfig, kubeletConfig, err := GetKubeConfigs(uri)
if err != nil {
return nil, err
}
kubeClient := kube_client.NewOrDie(kubeConfig)
kubeletClient, err := NewKubeletClient(kubeletConfig)
if err != nil {
return nil, err
}
// Get nodes to test if the client is configured well. Watch gives less error information.
if _, err := kubeClient.Nodes().List(kube_api.ListOptions{
LabelSelector: labels.Everything(),
FieldSelector: fields.Everything()}); err != nil {
glog.Errorf("Failed to load nodes: %v", err)
}
// watch nodes
lw := cache.NewListWatchFromClient(kubeClient, "nodes", kube_api.NamespaceAll, fields.Everything())
nodeLister := &cache.StoreToNodeLister{Store: cache.NewStore(cache.MetaNamespaceKeyFunc)}
reflector := cache.NewReflector(lw, &kube_api.Node{}, nodeLister.Store, time.Hour)
reflector.Run()
return &kubeletProvider{
nodeLister: nodeLister,
reflector: reflector,
kubeletClient: kubeletClient,
}, nil
}
示例3: newTestController
func newTestController() (*DaemonSetsController, *FakePodControl) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Experimental.Version()})
manager := NewDaemonSetsController(client)
podControl := &FakePodControl{}
manager.podControl = podControl
return manager, podControl
}
示例4: TestSyncJobExpectations
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations.
func TestSyncJobExpectations(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", GroupVersion: testapi.Default.GroupVersion()})
manager := NewJobController(client, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.updateHandler = func(job *extensions.Job) error { return nil }
job := newJob(2, 2)
manager.jobStore.Store.Add(job)
pods := newPodList(2, api.PodPending, job)
manager.podStore.Store.Add(&pods[0])
manager.expectations = FakeJobExpectations{
controller.NewControllerExpectations(), true, func() {
// If we check active pods before checking expectataions, the job
// will create a new replica because it doesn't see this pod, but
// has fulfilled its expectations.
manager.podStore.Store.Add(&pods[1])
},
}
manager.syncJob(getKey(job, t))
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
}
示例5: TestRCManagerNotReady
func TestRCManagerNotReady(t *testing.T) {
// Setup a fake server to listen for requests, and run the rc manager in steady state
fakeResponse := serverResponse{
statusCode: 200,
obj: &api.ReplicationController{},
}
testServer, _ := makeTestServer(t, api.NamespaceDefault, api.TenantDefault, fakeResponse)
defer testServer.Close()
client := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Default.Version()})
fakePodControl := controller.FakePodControl{}
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, 2)
manager.podControl = &fakePodControl
manager.podStoreSynced = func() bool { return false }
// Simulates the rc reflector running before the pod reflector. We don't
// want to end up creating replicas in this case until the pod reflector
// has synced, so the rc manager should just requeue the rc.
controllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(controllerSpec)
rcKey := getKey(controllerSpec, t)
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 0, 0)
queueRC, _ := manager.queue.Get()
if queueRC != rcKey {
t.Fatalf("Expected to find key %v in queue, found %v", rcKey, queueRC)
}
manager.podStoreSynced = alwaysReady
manager.syncReplicationController(rcKey)
validateSyncReplication(t, &fakePodControl, 1, 0)
}
示例6: Setup
func (c *Client) Setup(t *testing.T) *Client {
c.handler = &utiltesting.FakeHandler{
StatusCode: c.Response.StatusCode,
}
if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil {
c.handler.ResponseBody = *responseBody
}
c.server = httptest.NewServer(c.handler)
if c.Client == nil {
c.Client = client.NewOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
})
c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL})
}
c.QueryValidator = map[string]func(string, string) bool{}
return c
}
示例7: TestPodReadOnlyFilesystem
func TestPodReadOnlyFilesystem(t *testing.T) {
_, s := framework.RunAMaster(nil)
defer s.Close()
isReadOnly := true
ns := framework.CreateTestingNamespace("pod-readonly-root", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "xxx",
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "fake-name",
Image: "fakeimage",
SecurityContext: &api.SecurityContext{
ReadOnlyRootFilesystem: &isReadOnly,
},
},
},
},
}
if _, err := client.Pods(ns.Name).Create(pod); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
deletePodOrErrorf(t, client, ns.Name, pod.Name)
}
示例8: TestUnschedulableNodes
func TestUnschedulableNodes(t *testing.T) {
framework.DeleteAllEtcdKeys()
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
defer s.Close()
masterConfig := framework.NewIntegrationTestMasterConfig()
m, err := master.New(masterConfig)
if err != nil {
t.Fatalf("Error in bringing up the master: %v", err)
}
restClient := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(restClient.Events(""))
scheduler.New(schedulerConfig).Run()
defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, restClient, schedulerConfigFactory.NodeLister.Store)
}
示例9: TestMasterService
func TestMasterService(t *testing.T) {
_, s := framework.RunAMaster(framework.NewIntegrationTestMasterConfig())
defer s.Close()
client := client.NewOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
svcList, err := client.Services(api.NamespaceDefault).List(api.ListOptions{})
if err != nil {
t.Errorf("unexpected error: %v", err)
return false, nil
}
found := false
for i := range svcList.Items {
if svcList.Items[i].Name == "kubernetes" {
found = true
}
}
if found {
ep, err := client.Endpoints(api.NamespaceDefault).Get("kubernetes")
if err != nil {
return false, nil
}
if countEndpoints(ep) == 0 {
return false, fmt.Errorf("no endpoints for kubernetes service: %v", ep)
}
return true, nil
}
return false, nil
})
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
示例10: TestSyncPastDeadlineJobFinished
func TestSyncPastDeadlineJobFinished(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
manager := NewJobController(client, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
var actual *extensions.Job
manager.updateHandler = func(job *extensions.Job) error {
actual = job
return nil
}
job := newJob(1, 1)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
start := unversioned.Unix(unversioned.Now().Time.Unix()-15, 0)
job.Status.StartTime = &start
job.Status.Conditions = append(job.Status.Conditions, newCondition(extensions.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
manager.jobStore.Store.Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unexpected error when syncing jobs %v", err)
}
if len(fakePodControl.Templates) != 0 {
t.Errorf("Unexpected number of creates. Expected %d, saw %d\n", 0, len(fakePodControl.Templates))
}
if len(fakePodControl.DeletePodName) != 0 {
t.Errorf("Unexpected number of deletes. Expected %d, saw %d\n", 0, len(fakePodControl.DeletePodName))
}
if actual != nil {
t.Error("Unexpected job modification")
}
}
示例11: TestSyncJobUpdateRequeue
func TestSyncJobUpdateRequeue(t *testing.T) {
client := client.NewOrDie(&client.Config{Host: "", Version: testapi.Default.GroupAndVersion()})
manager := NewJobController(client, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl
manager.podStoreSynced = alwaysReady
manager.updateHandler = func(job *extensions.Job) error { return fmt.Errorf("Fake error") }
job := newJob(2, 2)
manager.jobStore.Store.Add(job)
err := manager.syncJob(getKey(job, t))
if err != nil {
t.Errorf("Unxpected error when syncing jobs, got %v", err)
}
ch := make(chan interface{})
go func() {
item, _ := manager.queue.Get()
ch <- item
}()
select {
case key := <-ch:
expectedKey := getKey(job, t)
if key != expectedKey {
t.Errorf("Expected requeue of job with key %s got %s", expectedKey, key)
}
case <-time.After(controllerTimeout):
manager.queue.ShutDown()
t.Errorf("Expected to find a job in the queue, found none.")
}
}
示例12: TestSyncEndpointsItemsPreserveNoSelector
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
ns := api.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Namespace: ns,
ResourceVersion: "1",
},
Subsets: []api.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9"}},
Ports: []api.EndpointPort{{Port: 1000}},
}},
}})
// TODO: Uncomment when fix #19254
// defer testServer.Close()
client := client.NewOrDie(&client.Config{Host: testServer.URL, GroupVersion: testapi.Default.GroupVersion()})
endpoints := NewEndpointController(client, controller.NoResyncPeriodFunc)
endpoints.serviceStore.Store.Add(&api.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}},
})
endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 0)
}
示例13: TestSecrets
// TestSecrets tests apiserver-side behavior of creation of secret objects and their use by pods.
func TestSecrets(t *testing.T) {
etcdStorage, err := framework.NewEtcdStorage()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
var m *master.Master
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
defer s.Close()
m = master.New(&master.Config{
DatabaseStorage: etcdStorage,
KubeletClient: client.FakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableUISupport: false,
EnableIndex: true,
APIPrefix: "/api",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
StorageVersions: map[string]string{"": testapi.Default.Version()},
})
framework.DeleteAllEtcdKeys()
client := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
DoTestSecrets(t, client, testapi.Default.Version())
}
示例14: createKubeClientOrDie
func createKubeClientOrDie(kubernetesUrl *url.URL) *kube_client.Client {
kubeConfig, err := kube_config.GetKubeClientConfig(kubernetesUrl)
if err != nil {
glog.Fatalf("Failed to get client config: %v", err)
}
return kube_client.NewOrDie(kubeConfig)
}
示例15: mustSetupScheduler
// mustSetupScheduler starts the following components:
// - k8s api server (a.k.a. master)
// - scheduler
// It returns scheduler config factory and destroyFunc which should be used to
// remove resources after finished.
// Notes on rate limiter:
// - The BindPodsRateLimiter is nil, meaning no rate limits.
// - client rate limit is set to 5000.
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
framework.DeleteAllEtcdKeys()
var m *master.Master
masterConfig := framework.NewIntegrationTestMasterConfig()
m = master.New(masterConfig)
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
c := client.NewOrDie(&client.Config{
Host: s.URL,
GroupVersion: testapi.Default.GroupVersion(),
QPS: 5000.0,
Burst: 5000,
})
schedulerConfigFactory = factory.NewConfigFactory(c, nil, api.DefaultSchedulerName)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
panic("Couldn't create scheduler config")
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartRecordingToSink(c.Events(""))
scheduler.New(schedulerConfig).Run()
destroyFunc = func() {
glog.Infof("destroying")
close(schedulerConfig.StopEverything)
s.Close()
glog.Infof("destroyed")
}
return
}