本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/clientset_generated/release_1_2.NewForConfig函数的典型用法代码示例。如果您正苦于以下问题:Golang NewForConfig函数的具体用法?Golang NewForConfig怎么用?Golang NewForConfig使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewForConfig函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: NewStatusController
func NewStatusController(pods []string) (*statusController, error) {
client, err := clientset.NewForConfig(&restclient.Config{Host: insecureAPIAddr})
if err != nil {
return nil, err
}
return &statusController{client: client, watchPods: pods}, nil
}
示例2: createAPIServerClient
// TODO(jdef): hacked from kubelet/server/server.go
// TODO(k8s): replace this with clientcmd
func (s *SchedulerServer) createAPIServerClient() (*clientset.Clientset, error) {
authInfo, err := clientauth.LoadFromFile(s.authPath)
if err != nil {
log.Warningf("Could not load kubernetes auth path: %v. Continuing with defaults.", err)
}
if authInfo == nil {
// authInfo didn't load correctly - continue with defaults.
authInfo = &clientauth.Info{}
}
clientConfig, err := authInfo.MergeWithConfig(client.Config{})
if err != nil {
return nil, err
}
if len(s.apiServerList) < 1 {
return nil, fmt.Errorf("no api servers specified")
}
// TODO: adapt Kube client to support LB over several servers
if len(s.apiServerList) > 1 {
log.Infof("Multiple api servers specified. Picking first one")
}
clientConfig.Host = s.apiServerList[0]
c, err := clientset.NewForConfig(&clientConfig)
if err != nil {
return nil, err
}
return c, nil
}
示例3: apiTest
func apiTest() error {
client, err := clientset.NewForConfig(&restclient.Config{Host: insecureAPIAddr})
if err != nil {
return err
}
_, err = client.Discovery().ServerVersion()
return err
}
示例4: Run
// Run runs the specified KubeletExecutorServer.
func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
// create shared channels
kubeletFinished := make(chan struct{})
nodeInfos := make(chan executor.NodeInfo, 1)
// create static pods directory
staticPodsConfigPath := filepath.Join(s.RootDirectory, "static-pods")
err := os.Mkdir(staticPodsConfigPath, 0750)
if err != nil {
return err
}
// create apiserver client
var apiclient *clientset.Clientset
clientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
if err == nil {
apiclient, err = clientset.NewForConfig(clientConfig)
}
if err != nil {
// required for k8sm since we need to send api.Binding information back to the apiserver
return fmt.Errorf("cannot create API client: %v", err)
}
var (
pw = cache.NewListWatchFromClient(apiclient.LegacyClient, "pods", api.NamespaceAll,
fields.OneTermEqualSelector(client.PodHost, s.HostnameOverride),
)
reg = executor.NewRegistry(apiclient)
)
// start executor
var executorDone <-chan struct{}
executorDone, err = s.runExecutor(nodeInfos, kubeletFinished, staticPodsConfigPath, apiclient, reg)
if err != nil {
return err
}
// start kubelet, blocking
return s.runKubelet(nodeInfos, kubeletFinished, staticPodsConfigPath, apiclient, pw, reg, executorDone)
}
示例5: Run
// Run runs the specified APIServer. This should never exit.
func Run(s *options.APIServer) error {
verifyClusterIPFlags(s)
// If advertise-address is not specified, use bind-address. If bind-address
// is not usable (unset, 0.0.0.0, or loopback), we will use the host's default
// interface as valid public addr for master (see: util/net#ValidPublicAddrForMaster)
if s.AdvertiseAddress == nil || s.AdvertiseAddress.IsUnspecified() {
hostIP, err := utilnet.ChooseBindAddress(s.BindAddress)
if err != nil {
glog.Fatalf("Unable to find suitable network address.error='%v' . "+
"Try to set the AdvertiseAddress directly or provide a valid BindAddress to fix this.", err)
}
s.AdvertiseAddress = hostIP
}
glog.Infof("Will report %v as public IP address.", s.AdvertiseAddress)
if len(s.EtcdServerList) == 0 {
glog.Fatalf("--etcd-servers must be specified")
}
if s.KubernetesServiceNodePort > 0 && !s.ServiceNodePortRange.Contains(s.KubernetesServiceNodePort) {
glog.Fatalf("Kubernetes service port range %v doesn't contain %v", s.ServiceNodePortRange, (s.KubernetesServiceNodePort))
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
// Setup tunneler if needed
var tunneler master.Tunneler
var proxyDialerFn apiserver.ProxyDialerFunc
if len(s.SSHUser) > 0 {
// Get ssh key distribution func, if supported
var installSSH master.InstallSSHKey
if cloud != nil {
if instances, supported := cloud.Instances(); supported {
installSSH = instances.AddSSHKeyToAllInstances
}
}
if s.KubeletConfig.Port == 0 {
glog.Fatalf("Must enable kubelet port if proxy ssh-tunneling is specified.")
}
// Set up the tunneler
// TODO(cjcullen): If we want this to handle per-kubelet ports or other
// kubelet listen-addresses, we need to plumb through options.
healthCheckPath := &url.URL{
Scheme: "https",
Host: net.JoinHostPort("127.0.0.1", strconv.FormatUint(uint64(s.KubeletConfig.Port), 10)),
Path: "healthz",
}
tunneler = master.NewSSHTunneler(s.SSHUser, s.SSHKeyfile, healthCheckPath, installSSH)
// Use the tunneler's dialer to connect to the kubelet
s.KubeletConfig.Dial = tunneler.Dial
// Use the tunneler's dialer when proxying to pods, services, and nodes
proxyDialerFn = tunneler.Dial
}
// Proxying to pods and services is IP-based... don't expect to be able to verify the hostname
proxyTLSClientConfig := &tls.Config{InsecureSkipVerify: true}
kubeletClient, err := kubeletclient.NewStaticKubeletClient(&s.KubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
apiGroupVersionOverrides, err := parseRuntimeConfig(s)
if err != nil {
glog.Fatalf("error in parsing runtime-config: %s", err)
}
clientConfig := &client.Config{
Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
}
if len(s.DeprecatedStorageVersion) != 0 {
gv, err := unversioned.ParseGroupVersion(s.DeprecatedStorageVersion)
if err != nil {
glog.Fatalf("error in parsing group version: %s", err)
}
clientConfig.GroupVersion = &gv
}
client, err := clientset.NewForConfig(clientConfig)
if err != nil {
glog.Errorf("Failed to create clientset: %v", err)
}
//.........这里部分代码省略.........
示例6: BeforeEach
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.Client == nil {
By("Creating a kubernetes client")
config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred())
config.QPS = f.options.ClientQPS
config.Burst = f.options.ClientBurst
if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType
}
c, err := loadClientFromConfig(config)
Expect(err).NotTo(HaveOccurred())
f.Client = c
f.Clientset_1_2, err = release_1_2.NewForConfig(config)
f.Clientset_1_3, err = release_1_3.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = release_1_4.NewForConfig(clientRepoConfig)
Expect(err).NotTo(HaveOccurred())
}
if f.federated {
if f.FederationClient == nil {
By("Creating a federated kubernetes client")
var err error
f.FederationClient, err = LoadFederationClient()
Expect(err).NotTo(HaveOccurred())
}
if f.FederationClientset == nil {
By("Creating an unversioned federation Clientset")
var err error
f.FederationClientset, err = LoadFederationClientset()
Expect(err).NotTo(HaveOccurred())
}
if f.FederationClientset_1_3 == nil {
By("Creating a release 1.3 federation Clientset")
var err error
f.FederationClientset_1_3, err = LoadFederationClientset_1_3()
Expect(err).NotTo(HaveOccurred())
}
if f.FederationClientset_1_4 == nil {
By("Creating a release 1.4 federation Clientset")
var err error
f.FederationClientset_1_4, err = LoadFederationClientset_1_4()
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for federation-apiserver to be ready")
err := WaitForFederationApiserverReady(f.FederationClientset)
Expect(err).NotTo(HaveOccurred())
By("federation-apiserver is ready")
}
By("Building a namespace api object")
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
"e2e-framework": f.BaseName,
})
Expect(err).NotTo(HaveOccurred())
f.Namespace = namespace
if TestContext.VerifyServiceAccount {
By("Waiting for a default service account to be provisioned in namespace")
err = WaitForDefaultServiceAccountInNamespace(f.Client, namespace.Name)
Expect(err).NotTo(HaveOccurred())
} else {
Logf("Skipping waiting for service account")
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
f.gatherer, err = NewResourceUsageGatherer(f.Client, ResourceGathererOptions{
inKubemark: ProviderIs("kubemark"),
masterOnly: TestContext.GatherKubeSystemResourceUsageData == "master",
})
if err != nil {
Logf("Error while creating NewResourceUsageGatherer: %v", err)
} else {
go f.gatherer.startGatheringData()
}
}
if TestContext.GatherLogsSizes {
f.logsSizeWaitGroup = sync.WaitGroup{}
f.logsSizeWaitGroup.Add(1)
f.logsSizeCloseChannel = make(chan bool)
f.logsSizeVerifier = NewLogsVerifier(f.Client, f.logsSizeCloseChannel)
go func() {
f.logsSizeVerifier.Run()
f.logsSizeWaitGroup.Done()
}()
}
}
示例7: runKubelet
func (s *KubeletExecutorServer) runKubelet(
nodeInfos <-chan executor.NodeInfo,
kubeletDone chan<- struct{},
staticPodsConfigPath string,
apiclient *clientset.Clientset,
podLW *cache.ListWatch,
registry executor.Registry,
executorDone <-chan struct{},
) (err error) {
defer func() {
if err != nil {
// close the channel here. When Run returns without error, the executorKubelet is
// responsible to do this. If it returns with an error, we are responsible here.
close(kubeletDone)
}
}()
kcfg, err := kubeletapp.UnsecuredKubeletConfig(s.KubeletServer)
if err != nil {
return err
}
// apply Mesos specific settings
kcfg.Builder = func(kc *kubeletapp.KubeletConfig) (kubeletapp.KubeletBootstrap, *kconfig.PodConfig, error) {
k, pc, err := kubeletapp.CreateAndInitKubelet(kc)
if err != nil {
return k, pc, err
}
// decorate kubelet such that it shuts down when the executor is
decorated := &executorKubelet{
Kubelet: k.(*kubelet.Kubelet),
kubeletDone: kubeletDone,
executorDone: executorDone,
}
return decorated, pc, nil
}
kcfg.DockerDaemonContainer = "" // don't move the docker daemon into a cgroup
kcfg.Hostname = kcfg.HostnameOverride
kcfg.KubeClient = apiclient
// taken from KubeletServer#Run(*KubeletConfig)
eventClientConfig, err := kubeletapp.CreateAPIServerClientConfig(s.KubeletServer)
if err != nil {
return err
}
// make a separate client for events
eventClientConfig.QPS = s.EventRecordQPS
eventClientConfig.Burst = s.EventBurst
kcfg.EventClient, err = clientset.NewForConfig(eventClientConfig)
if err != nil {
return err
}
kcfg.NodeName = kcfg.HostnameOverride
kcfg.PodConfig = kconfig.NewPodConfig(kconfig.PodConfigNotificationIncremental, kcfg.Recorder) // override the default pod source
kcfg.StandaloneMode = false
kcfg.SystemContainer = "" // don't take control over other system processes.
if kcfg.Cloud != nil {
// fail early and hard because having the cloud provider loaded would go unnoticed,
// but break bigger cluster because accessing the state.json from every slave kills the master.
panic("cloud provider must not be set")
}
// create custom cAdvisor interface which return the resource values that Mesos reports
ni := <-nodeInfos
cAdvisorInterface, err := NewMesosCadvisor(ni.Cores, ni.Mem, s.CAdvisorPort)
if err != nil {
return err
}
kcfg.CAdvisorInterface = cAdvisorInterface
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, cAdvisorInterface)
if err != nil {
return err
}
go func() {
for ni := range nodeInfos {
// TODO(sttts): implement with MachineAllocable mechanism when https://github.com/kubernetes/kubernetes/issues/13984 is finished
log.V(3).Infof("ignoring updated node resources: %v", ni)
}
}()
// create main pod source, it will stop generating events once executorDone is closed
newSourceMesos(executorDone, kcfg.PodConfig.Channel(mesosSource), podLW, registry)
// create static-pods directory file source
log.V(2).Infof("initializing static pods source factory, configured at path %q", staticPodsConfigPath)
fileSourceUpdates := kcfg.PodConfig.Channel(kubetypes.FileSource)
kconfig.NewSourceFile(staticPodsConfigPath, kcfg.HostnameOverride, kcfg.FileCheckFrequency, fileSourceUpdates)
// run the kubelet
// NOTE: because kcfg != nil holds, the upstream Run function will not
// initialize the cloud provider. We explicitly wouldn't want
// that because then every kubelet instance would query the master
// state.json which does not scale.
err = kubeletapp.Run(s.KubeletServer, kcfg)
//.........这里部分代码省略.........
示例8: Run
// Run runs the specified KubeletServer for the given KubeletConfig. This should never exit.
// The kcfg argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletConfig object and all defaults
// will be ignored.
func Run(s *options.KubeletServer, kcfg *KubeletConfig) error {
var err error
if kcfg == nil {
cfg, err := UnsecuredKubeletConfig(s)
if err != nil {
return err
}
kcfg = cfg
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kcfg.KubeClient, err = clientset.NewForConfig(clientConfig)
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = s.EventRecordQPS
eventClientConfig.Burst = s.EventBurst
kcfg.EventClient, err = clientset.NewForConfig(&eventClientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort)
if err != nil {
return err
}
}
if kcfg.ContainerManager == nil {
kcfg.ContainerManager, err = cm.NewContainerManager(kcfg.Mounter, kcfg.CAdvisorInterface)
if err != nil {
return err
}
}
runtime.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
// TODO(vmarmol): Do this through container config.
oomAdjuster := kcfg.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
if err := RunKubelet(kcfg); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go util.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(s.HealthzPort)), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, util.NeverStop)
}
if s.RunOnce {
return nil
}
// run forever
select {}
}
示例9: main
func main() {
// First log our starting config, and then set up.
log.Infof("Invoked by %v", os.Args)
flag.Parse()
// Perform further validation of flags.
if *deployment == "" {
log.Fatal("Must specify a deployment.")
}
if *threshold < 0 || *threshold > 100 {
log.Fatalf("Threshold must be between 0 and 100 inclusively, was %d.", threshold)
}
log.Infof("Watching namespace: %s, pod: %s, container: %s.", *podNamespace, *podName, *containerName)
log.Infof("cpu: %s, extra_cpu: %s, memory: %s, extra_memory: %s, storage: %s, extra_storage: %s", *baseCPU, *cpuPerNode, *baseMemory, *memoryPerNode, *baseStorage, *storagePerNode)
// Set up work objects.
config, err := restclient.InClusterConfig()
if err != nil {
log.Fatal(err)
}
clientset, err := release_1_2.NewForConfig(config)
if err != nil {
log.Fatal(err)
}
k8s := nanny.NewKubernetesClient(*podNamespace, *deployment, *podName, *containerName, clientset)
var resources []nanny.Resource
// Monitor only the resources specified.
if *baseCPU != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseCPU),
ExtraPerNode: resource.MustParse(*cpuPerNode),
Name: "cpu",
})
}
if *baseMemory != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseMemory),
ExtraPerNode: resource.MustParse(*memoryPerNode),
Name: "memory",
})
}
if *baseStorage != noValue {
resources = append(resources, nanny.Resource{
Base: resource.MustParse(*baseStorage),
ExtraPerNode: resource.MustParse(*memoryPerNode),
Name: "storage",
})
}
log.Infof("Resources: %v", resources)
est := nanny.LinearEstimator{
Resources: resources,
}
// Begin nannying.
nanny.PollAPIServer(k8s, est, *containerName, pollPeriod, uint64(*threshold))
}