本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned.NewExtensionsOrDie函数的典型用法代码示例。如果您正苦于以下问题:Golang NewExtensionsOrDie函数的具体用法?Golang NewExtensionsOrDie怎么用?Golang NewExtensionsOrDie使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewExtensionsOrDie函数的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Setup
func (c *Client) Setup(t *testing.T) *Client {
c.handler = &utiltesting.FakeHandler{
StatusCode: c.Response.StatusCode,
}
if responseBody := body(t, c.Response.Body, c.Response.RawBody); responseBody != nil {
c.handler.ResponseBody = *responseBody
}
c.server = httptest.NewServer(c.handler)
if c.Client == nil {
c.Client = client.NewOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()},
})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
c.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{
Host: c.server.URL,
ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
})
c.Clientset = clientset.NewForConfigOrDie(&client.Config{Host: c.server.URL})
}
c.QueryValidator = map[string]func(string, string) bool{}
return c
}
示例2: Setup
func (c *Client) Setup(t *testing.T) *Client {
c.handler = &utiltesting.FakeHandler{
StatusCode: c.Response.StatusCode,
}
if responseBody := c.body(t, c.Response.Body, c.Response.RawBody); responseBody != nil {
c.handler.ResponseBody = *responseBody
}
c.server = httptest.NewServer(c.handler)
if c.Client == nil {
c.Client = client.NewOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion},
})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
c.AutoscalingClient = client.NewAutoscalingOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Autoscaling.GroupVersion()},
})
c.BatchClient = client.NewBatchOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Batch.GroupVersion()},
})
c.ExtensionsClient = client.NewExtensionsOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()},
})
c.RbacClient = client.NewRbacOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Rbac.GroupVersion()},
})
c.StorageClient = client.NewStorageOrDie(&restclient.Config{
Host: c.server.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Storage.GroupVersion()},
})
c.Clientset = clientset.NewForConfigOrDie(&restclient.Config{Host: c.server.URL})
}
c.QueryValidator = map[string]func(string, string) bool{}
return c
}
示例3: startComponents
func startComponents(firstManifestURL, secondManifestURL string) (string, string) {
// Setup
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
cfg := etcd.Config{
Endpoints: []string{"http://127.0.0.1:4001"},
}
etcdClient, err := etcd.New(cfg)
if err != nil {
glog.Fatalf("Error creating etcd client: %v", err)
}
glog.Infof("Creating etcd client pointing to %v", cfg.Endpoints)
keysAPI := etcd.NewKeysAPI(etcdClient)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := keysAPI.Get(context.TODO(), "/", nil)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := keysAPI.Delete(context.TODO(), node.Key, &etcd.DeleteOptions{Recursive: true}); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
clientset := clientset.NewForConfigOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
cl.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServer.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}})
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("No public address for %s", host)
}
// The caller of master.New should guarantee pulicAddress is properly set
hostIP, err := utilnet.ChooseBindAddress(publicAddress)
if err != nil {
glog.Fatalf("Unable to find suitable network address.error='%v' . "+
"Fail to get a valid public address for master.", err)
}
masterConfig := framework.NewMasterConfig()
masterConfig.EnableCoreControllers = true
masterConfig.EnableProfiling = true
masterConfig.ReadWritePort = portNumber
masterConfig.PublicAddress = hostIP
masterConfig.CacheTimeout = 2 * time.Second
// Create a master and install handlers into mux.
m := master.New(masterConfig)
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, api.DefaultSchedulerName)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(cl.Events(""))
scheduler.New(schedulerConfig).Run()
// ensure the service endpoints are sync'd several times within the window that the integration tests wait
go endpointcontroller.NewEndpointController(clientset, controller.NoResyncPeriodFunc).
Run(3, util.NeverStop)
// TODO: Write an integration test for the replication controllers watch.
go replicationcontroller.NewReplicationManager(clientset, controller.NoResyncPeriodFunc, replicationcontroller.BurstReplicas).
Run(3, util.NeverStop)
nodeController := nodecontroller.NewNodeController(nil, clientset, 5*time.Minute, util.NewFakeRateLimiter(), util.NewFakeRateLimiter(),
//.........这里部分代码省略.........
示例4: main
func main() {
gruntime.GOMAXPROCS(gruntime.NumCPU())
addFlags(pflag.CommandLine)
util.InitFlags()
utilruntime.ReallyCrash = true
util.InitLogs()
defer util.FlushLogs()
go func() {
defer util.FlushLogs()
time.Sleep(maxTestTimeout)
glog.Fatalf("This test has timed out.")
}()
glog.Infof("Running tests for APIVersion: %s", os.Getenv("KUBE_TEST_API"))
firstManifestURL := ServeCachedManifestFile(testPodSpecFile)
secondManifestURL := ServeCachedManifestFile(testPodSpecFile)
apiServerURL, _ := startComponents(firstManifestURL, secondManifestURL)
// Ok. we're good to go.
glog.Infof("API Server started on %s", apiServerURL)
// Wait for the synchronization threads to come up.
time.Sleep(time.Second * 10)
kubeClient := client.NewOrDie(&client.Config{Host: apiServerURL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
kubeClient.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServerURL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Extensions.GroupVersion()}})
// Run tests in parallel
testFuncs := []testFunc{
runReplicationControllerTest,
runAtomicPutTest,
runPatchTest,
runServiceTest,
runAPIVersionsTest,
runMasterServiceTest,
func(c *client.Client) {
runSelfLinkTestOnNamespace(c, api.NamespaceDefault)
runSelfLinkTestOnNamespace(c, "other")
},
}
// Only run at most maxConcurrency tests in parallel.
if maxConcurrency <= 0 {
maxConcurrency = len(testFuncs)
}
glog.Infof("Running %d tests in parallel.", maxConcurrency)
ch := make(chan struct{}, maxConcurrency)
var wg sync.WaitGroup
wg.Add(len(testFuncs))
for i := range testFuncs {
f := testFuncs[i]
go func() {
ch <- struct{}{}
f(kubeClient)
<-ch
wg.Done()
}()
}
wg.Wait()
close(ch)
// Check that kubelet tried to make the containers.
// Using a set to list unique creation attempts. Our fake is
// really stupid, so kubelet tries to create these multiple times.
createdConts := sets.String{}
for _, p := range fakeDocker1.Created {
// The last 8 characters are random, so slice them off.
if n := len(p); n > 8 {
createdConts.Insert(p[:n-8])
}
}
for _, p := range fakeDocker2.Created {
// The last 8 characters are random, so slice them off.
if n := len(p); n > 8 {
createdConts.Insert(p[:n-8])
}
}
// We expect 9: 2 pod infra containers + 2 containers from the replication controller +
// 1 pod infra container + 2 containers from the URL on first Kubelet +
// 1 pod infra container + 2 containers from the URL on second Kubelet +
// 1 pod infra container + 1 container from the service test.
// The total number of container created is 9
if len(createdConts) != 12 {
glog.Fatalf("Expected 12 containers; got %v\n\nlist of created containers:\n\n%#v\n\nDocker 1 Created:\n\n%#v\n\nDocker 2 Created:\n\n%#v\n\n", len(createdConts), createdConts.List(), fakeDocker1.Created, fakeDocker2.Created)
}
glog.Infof("OK - found created containers: %#v", createdConts.List())
// This test doesn't run with the others because it can't run in
// parallel and also it schedules extra pods which would change the
// above pod counting logic.
runSchedulerNoPhantomPodsTest(kubeClient)
glog.Infof("\n\nLogging high latency metrics from the 10250 kubelet")
e2e.HighLatencyKubeletOperations(nil, 1*time.Second, "localhost:10250")
//.........这里部分代码省略.........
示例5: startComponents
func startComponents(firstManifestURL, secondManifestURL string) (string, string) {
// Setup
servers := []string{}
glog.Infof("Creating etcd client pointing to %v", servers)
handler := delegateHandler{}
apiServer := httptest.NewServer(&handler)
etcdClient := etcd.NewClient(servers)
sleep := 4 * time.Second
ok := false
for i := 0; i < 3; i++ {
keys, err := etcdClient.Get("/", false, false)
if err != nil {
glog.Warningf("Unable to list root etcd keys: %v", err)
if i < 2 {
time.Sleep(sleep)
sleep = sleep * sleep
}
continue
}
for _, node := range keys.Node.Nodes {
if _, err := etcdClient.Delete(node.Key, true); err != nil {
glog.Fatalf("Unable delete key: %v", err)
}
}
ok = true
break
}
if !ok {
glog.Fatalf("Failed to connect to etcd")
}
cl := client.NewOrDie(&client.Config{Host: apiServer.URL, GroupVersion: testapi.Default.GroupVersion()})
// TODO: caesarxuchao: hacky way to specify version of Experimental client.
// We will fix this by supporting multiple group versions in Config
cl.ExtensionsClient = client.NewExtensionsOrDie(&client.Config{Host: apiServer.URL, GroupVersion: testapi.Extensions.GroupVersion()})
storageVersions := make(map[string]string)
etcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("").InterfacesFor, testapi.Default.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions[""] = testapi.Default.GroupAndVersion()
if err != nil {
glog.Fatalf("Unable to get etcd storage: %v", err)
}
expEtcdStorage, err := master.NewEtcdStorage(etcdClient, latest.GroupOrDie("extensions").InterfacesFor, testapi.Extensions.GroupAndVersion(), etcdtest.PathPrefix())
storageVersions["extensions"] = testapi.Extensions.GroupAndVersion()
if err != nil {
glog.Fatalf("Unable to get etcd storage for experimental: %v", err)
}
storageDestinations := master.NewStorageDestinations()
storageDestinations.AddAPIGroup("", etcdStorage)
storageDestinations.AddAPIGroup("extensions", expEtcdStorage)
// Master
host, port, err := net.SplitHostPort(strings.TrimLeft(apiServer.URL, "http://"))
if err != nil {
glog.Fatalf("Unable to parse URL '%v': %v", apiServer.URL, err)
}
portNumber, err := strconv.Atoi(port)
if err != nil {
glog.Fatalf("Nonnumeric port? %v", err)
}
publicAddress := net.ParseIP(host)
if publicAddress == nil {
glog.Fatalf("No public address for %s", host)
}
// The caller of master.New should guarantee pulicAddress is properly set
hostIP, err := util.ValidPublicAddrForMaster(publicAddress)
if err != nil {
glog.Fatalf("Unable to find suitable network address.error='%v' . "+
"Fail to get a valid public address for master.", err)
}
// Create a master and install handlers into mux.
m := master.New(&master.Config{
StorageDestinations: storageDestinations,
KubeletClient: fakeKubeletClient{},
EnableCoreControllers: true,
EnableLogsSupport: false,
EnableProfiling: true,
APIPrefix: "/api",
APIGroupPrefix: "/apis",
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admit.NewAlwaysAdmit(),
ReadWritePort: portNumber,
PublicAddress: hostIP,
CacheTimeout: 2 * time.Second,
StorageVersions: storageVersions,
})
handler.delegate = m.Handler
// Scheduler
schedulerConfigFactory := factory.NewConfigFactory(cl, nil)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
glog.Fatalf("Couldn't create scheduler config: %v", err)
}
//.........这里部分代码省略.........