本文整理汇总了Golang中github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider.InitCloudProvider函数的典型用法代码示例。如果您正苦于以下问题:Golang InitCloudProvider函数的具体用法?Golang InitCloudProvider怎么用?Golang InitCloudProvider使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了InitCloudProvider函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: Run
// Run runs the specified SchedulerServer. This should never exit.
func (s *SchedulerServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
configFactory := factory.NewConfigFactory(kubeClient)
config, err := s.createConfig(configFactory)
if err != nil {
glog.Fatalf("Failed to create scheduler configuration: %v", err)
}
config.Cloud, err = cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"})
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
sched := scheduler.New(config)
sched.Run()
select {}
}
示例2: Run
// Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error {
s.verifyMinionFlags()
if len(s.ClientConfig.Host) == 0 {
glog.Fatal("usage: controller-manager --master <master>")
}
kubeClient, err := client.New(&s.ClientConfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
if s.EnableProfiling {
mux := http.NewServeMux()
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
http.ListenAndServe(net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)), nil)
}()
endpoints := service.NewEndpointController(kubeClient)
go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
controllerManager.Run(replicationControllerPkg.DefaultSyncPeriod)
kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(s.NodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: s.NodeMemory,
},
}
nodeController := nodeControllerPkg.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
kubeClient, kubeletClient, s.RegisterRetryCount, s.PodEvictionTimeout)
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList, s.SyncNodeStatus)
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod)
select {}
return nil
}
示例3: main
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
verifyMinionFlags()
if len(clientConfig.Host) == 0 {
glog.Fatal("usage: controller-manager -master <master>")
}
kubeClient, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
if int64(int(*nodeMilliCPU)) != *nodeMilliCPU {
glog.Warningf("node_milli_cpu is too big for platform. Clamping: %d -> %d",
*nodeMilliCPU, math.MaxInt32)
*nodeMilliCPU = math.MaxInt32
}
if int64(int(*nodeMemory)) != *nodeMemory {
glog.Warningf("node_memory is too big for platform. Clamping: %d -> %d",
*nodeMemory, math.MaxInt32)
*nodeMemory = math.MaxInt32
}
go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil)
endpoints := service.NewEndpointController(kubeClient)
go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
controllerManager.Run(10 * time.Second)
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
resources.CPU: util.NewIntOrStringFromInt(int(*nodeMilliCPU)),
resources.Memory: util.NewIntOrStringFromInt(int(*nodeMemory)),
},
}
minionController := minionControllerPkg.NewMinionController(cloud, *minionRegexp, machineList, nodeResources, kubeClient)
minionController.Run(10 * time.Second)
select {}
}
示例4: main
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
verifyMinionFlags()
if len(clientConfig.Host) == 0 {
glog.Fatal("usage: controller-manager -master <master>")
}
kubeClient, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go http.ListenAndServe(net.JoinHostPort(address.String(), strconv.Itoa(*port)), nil)
endpoints := service.NewEndpointController(kubeClient)
go util.Forever(func() { endpoints.SyncServiceEndpoints() }, time.Second*10)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
controllerManager.Run(10 * time.Second)
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(*nodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: *nodeMemory,
},
}
nodeController := nodeControllerPkg.NewNodeController(cloud, *minionRegexp, machineList, nodeResources, kubeClient)
nodeController.Run(10 * time.Second)
select {}
}
示例5: BuildKubernetesMasterConfig
func BuildKubernetesMasterConfig(options configapi.MasterConfig, requestContextMapper kapi.RequestContextMapper, kubeClient *kclient.Client) (*MasterConfig, error) {
if options.KubernetesMasterConfig == nil {
return nil, errors.New("insufficient information to build KubernetesMasterConfig")
}
// Connect and setup etcd interfaces
etcdClient, err := etcd.GetAndTestEtcdClient(options.EtcdClientInfo)
if err != nil {
return nil, err
}
ketcdHelper, err := master.NewEtcdHelper(etcdClient, options.EtcdStorageConfig.KubernetesStorageVersion, options.EtcdStorageConfig.KubernetesStoragePrefix)
if err != nil {
return nil, fmt.Errorf("Error setting up Kubernetes server storage: %v", err)
}
kubeletClientConfig := configapi.GetKubeletClientConfig(options)
kubeletClient, err := kclient.NewKubeletClient(kubeletClientConfig)
if err != nil {
return nil, fmt.Errorf("unable to configure Kubelet client: %v", err)
}
// in-order list of plug-ins that should intercept admission decisions
// TODO: Push node environment support to upstream in future
_, portString, err := net.SplitHostPort(options.ServingInfo.BindAddress)
if err != nil {
return nil, err
}
port, err := strconv.Atoi(portString)
if err != nil {
return nil, err
}
portRange, err := util.ParsePortRange(options.KubernetesMasterConfig.ServicesNodePortRange)
if err != nil {
return nil, err
}
podEvictionTimeout, err := time.ParseDuration(options.KubernetesMasterConfig.PodEvictionTimeout)
if err != nil {
return nil, fmt.Errorf("unable to parse PodEvictionTimeout: %v", err)
}
server := app.NewAPIServer()
server.EventTTL = 2 * time.Hour
server.PortalNet = util.IPNet(flagtypes.DefaultIPNet(options.KubernetesMasterConfig.ServicesSubnet))
server.ServiceNodePorts = *portRange
server.AdmissionControl = strings.Join([]string{
"NamespaceExists", "NamespaceLifecycle", "OriginPodNodeEnvironment", "LimitRanger", "ServiceAccount", "SecurityContextConstraint", "ResourceQuota",
}, ",")
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.APIServerArguments, server.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
cmserver := cmapp.NewCMServer()
cmserver.PodEvictionTimeout = podEvictionTimeout
// resolve extended arguments
// TODO: this should be done in config validation (along with the above) so we can provide
// proper errors
if err := cmdflags.Resolve(options.KubernetesMasterConfig.ControllerArguments, cmserver.AddFlags); len(err) > 0 {
return nil, kerrors.NewAggregate(err)
}
cloud, err := cloudprovider.InitCloudProvider(cmserver.CloudProvider, cmserver.CloudConfigFile)
if err != nil {
return nil, err
}
admissionController := admission.NewFromPlugins(kubeClient, strings.Split(server.AdmissionControl, ","), server.AdmissionControlConfigFile)
m := &master.Config{
PublicAddress: net.ParseIP(options.KubernetesMasterConfig.MasterIP),
ReadWritePort: port,
ReadOnlyPort: port,
EtcdHelper: ketcdHelper,
EventTTL: server.EventTTL,
//MinRequestTimeout: server.MinRequestTimeout,
PortalNet: (*net.IPNet)(&server.PortalNet),
ServiceNodePorts: server.ServiceNodePorts,
RequestContextMapper: requestContextMapper,
KubeletClient: kubeletClient,
APIPrefix: KubeAPIPrefix,
EnableCoreControllers: true,
MasterCount: options.KubernetesMasterConfig.MasterCount,
Authorizer: apiserver.NewAlwaysAllowAuthorizer(),
AdmissionControl: admissionController,
DisableV1Beta1: true,
//.........这里部分代码省略.........
示例6: Run
func (s *CMServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
profile.InstallHandler(mux)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
endpoints := s.createEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
controllerManager := controller.NewReplicationManager(kubeClient, controller.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
//TODO(jdef) should eventually support more cloud providers here
if s.CloudProvider != mesos.ProviderName {
glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)
}
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.RegisterRetryCount,
s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
routes, ok := cloud.Routes()
if !ok {
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
}
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
routeController.Run(s.NodeSyncPeriod)
}
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod)
namespaceManager := namespace.NewNamespaceManager(kubeClient, s.NamespaceSyncPeriod)
namespaceManager.Run()
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins())
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccount.NewTokensController(
kubeClient,
serviceaccount.DefaultTokenControllerOptions(
serviceaccount.JWTTokenGenerator(privateKey),
),
).Run()
}
}
serviceaccount.NewServiceAccountsController(
kubeClient,
serviceaccount.DefaultServiceAccountsControllerOptions(),
).Run()
//.........这里部分代码省略.........
示例7: main
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
verifyPortalFlags()
if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) {
glog.Fatalf("specify either -etcd_servers or -etcd_config")
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: *allowPrivileged,
})
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
kubeletClient, err := client.NewKubeletClient(&kubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
_, v1beta3 := runtimeConfig["api/v1beta3"]
// TODO: expose same flags as client.BindClientConfigFlags but for a server
clientConfig := &client.Config{
Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))),
Version: *storageVersion,
}
client, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid server address: %v", err)
}
helper, err := newEtcd(*etcdConfigFile, etcdServerList)
if err != nil {
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
}
n := net.IPNet(portalNet)
authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile)
if err != nil {
glog.Fatalf("Invalid Authentication Config: %v", err)
}
authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile)
if err != nil {
glog.Fatalf("Invalid Authorization Config: %v", err)
}
admissionControlPluginNames := strings.Split(*admissionControl, ",")
admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, *admissionControlConfigFile)
config := &master.Config{
Client: client,
Cloud: cloud,
EtcdHelper: helper,
HealthCheckMinions: *healthCheckMinions,
EventTTL: *eventTTL,
KubeletClient: kubeletClient,
PortalNet: &n,
EnableLogsSupport: *enableLogsSupport,
EnableUISupport: true,
EnableSwaggerSupport: true,
APIPrefix: *apiPrefix,
CorsAllowedOriginList: corsAllowedOriginList,
ReadOnlyPort: *readOnlyPort,
ReadWritePort: *port,
PublicAddress: *publicAddressOverride,
Authenticator: authenticator,
Authorizer: authorizer,
AdmissionControl: admissionController,
EnableV1Beta3: v1beta3,
MasterServiceNamespace: *masterServiceNamespace,
}
m := master.New(config)
// We serve on 3 ports. See docs/reaching_the_api.md
roLocation := ""
if *readOnlyPort != 0 {
roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort))
}
secureLocation := ""
if *securePort != 0 {
secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort))
}
rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port)))
// See the flag commentary to understand our assumptions when opening the read-only and read-write ports.
if roLocation != "" {
// Allow 1 read-only request per second, allow up to 20 in a burst before enforcing.
rl := util.NewTokenBucketRateLimiter(1.0, 20)
readOnlyServer := &http.Server{
Addr: roLocation,
Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))),
ReadTimeout: 5 * time.Minute,
WriteTimeout: 5 * time.Minute,
//.........这里部分代码省略.........
示例8: Run
// Run runs the specified KubeletServer. This should never exit.
func (s *KubeletServer) Run(_ []string) error {
util.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
var apiclient *client.Client
clientConfig, err := s.CreateAPIServerClientConfig()
if err == nil {
apiclient, err = client.New(clientConfig)
}
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
imageGCPolicy := kubelet.ImageGCPolicy{
HighThresholdPercent: s.ImageGCHighThresholdPercent,
LowThresholdPercent: s.ImageGCLowThresholdPercent,
}
diskSpacePolicy := kubelet.DiskSpacePolicy{
DockerFreeDiskMB: s.LowDiskSpaceThresholdMB,
RootFreeDiskMB: s.LowDiskSpaceThresholdMB,
}
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
if err != nil {
return err
}
tlsOptions, err := s.InitializeTLS()
if err != nil {
return err
}
mounter := mount.New()
if s.Containerized {
glog.V(2).Info("Running kubelet in containerized mode (experimental)")
mounter = &mount.NsenterMounter{}
}
var dockerExecHandler dockertools.ExecHandler
switch s.DockerExecHandlerName {
case "native":
dockerExecHandler = &dockertools.NativeExecHandler{}
case "nsenter":
dockerExecHandler = &dockertools.NsenterExecHandler{}
default:
glog.Warningf("Unknown Docker exec handler %q; defaulting to native", s.DockerExecHandlerName)
dockerExecHandler = &dockertools.NativeExecHandler{}
}
kcfg := KubeletConfig{
Address: s.Address,
AllowPrivileged: s.AllowPrivileged,
HostNetworkSources: hostNetworkSources,
HostnameOverride: s.HostnameOverride,
RootDirectory: s.RootDirectory,
ConfigFile: s.Config,
ManifestURL: s.ManifestURL,
FileCheckFrequency: s.FileCheckFrequency,
HTTPCheckFrequency: s.HTTPCheckFrequency,
PodInfraContainerImage: s.PodInfraContainerImage,
SyncFrequency: s.SyncFrequency,
RegistryPullQPS: s.RegistryPullQPS,
RegistryBurst: s.RegistryBurst,
MinimumGCAge: s.MinimumGCAge,
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
MaxContainerCount: s.MaxContainerCount,
RegisterNode: s.RegisterNode,
StandaloneMode: (len(s.APIServerList) == 0),
ClusterDomain: s.ClusterDomain,
ClusterDNS: s.ClusterDNS,
Runonce: s.RunOnce,
Port: s.Port,
ReadOnlyPort: s.ReadOnlyPort,
CadvisorInterface: cadvisorInterface,
EnableServer: s.EnableServer,
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
KubeClient: apiclient,
MasterServiceNamespace: s.MasterServiceNamespace,
VolumePlugins: ProbeVolumePlugins(),
NetworkPlugins: ProbeNetworkPlugins(),
NetworkPluginName: s.NetworkPluginName,
//.........这里部分代码省略.........
示例9: Run
// Run runs the specified APIServer. This should never exit.
func (s *APIServer) Run(_ []string) error {
s.verifyClusterIPFlags()
// If advertise-address is not specified, use bind-address. If bind-address
// is also unset (or 0.0.0.0), setDefaults() in pkg/master/master.go will
// do the right thing and use the host's default interface.
if s.AdvertiseAddress == nil || net.IP(s.AdvertiseAddress).IsUnspecified() {
s.AdvertiseAddress = s.BindAddress
}
if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
glog.Fatalf("specify either --etcd-servers or --etcd-config")
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
HostNetworkSources: []string{},
})
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
// "api/all=false" allows users to selectively enable specific api versions.
disableAllAPIs := false
allAPIFlagValue, ok := s.RuntimeConfig["api/all"]
if ok && allAPIFlagValue == "false" {
disableAllAPIs = true
}
// "api/legacy=false" allows users to disable legacy api versions.
disableLegacyAPIs := false
legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"]
if ok && legacyAPIFlagValue == "false" {
disableLegacyAPIs = true
}
_ = disableLegacyAPIs // hush the compiler while we don't have legacy APIs to disable.
// v1beta3 is disabled by default. Users can enable it using "api/v1beta3=true"
enableV1beta3 := s.getRuntimeConfigValue("api/v1beta3", false)
// "api/v1={true|false} allows users to enable/disable v1 API.
// This takes preference over api/all and api/legacy, if specified.
disableV1 := disableAllAPIs
disableV1 = !s.getRuntimeConfigValue("api/v1", !disableV1)
// TODO: expose same flags as client.BindClientConfigFlags but for a server
clientConfig := &client.Config{
Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
Version: s.StorageVersion,
}
client, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid server address: %v", err)
}
helper, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix)
if err != nil {
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
}
// TODO Is this the right place for migration to happen? Must *both* old and
// new etcd prefix params be supplied for this to be valid?
if s.OldEtcdPathPrefix != "" {
if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil {
glog.Fatalf("Migration of old etcd keys failed: %v", err)
}
}
n := net.IPNet(s.ServiceClusterIPRange)
// Default to the private server key for service account token signing
if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
if apiserver.IsValidServiceAccountKeyFile(s.TLSPrivateKeyFile) {
s.ServiceAccountKeyFile = s.TLSPrivateKeyFile
} else {
glog.Warning("no RSA key provided, service account token authentication disabled")
}
}
authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, helper)
if err != nil {
glog.Fatalf("Invalid Authentication Config: %v", err)
}
authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile)
if err != nil {
glog.Fatalf("Invalid Authorization Config: %v", err)
}
admissionControlPluginNames := strings.Split(s.AdmissionControl, ",")
admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile)
//.........这里部分代码省略.........
示例10: Run
// Run runs the specified KubeletConfig and fills out any config that is not defined. This should never exit.
func (s *KubeletServer) Run(kcfg *KubeletConfig) error {
if kcfg == nil {
cfg, err := s.KubeletConfig()
if err != nil {
return err
}
kcfg = cfg
}
if kcfg.KubeClient == nil {
client, err := s.createAPIServerClient()
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
kcfg.KubeClient = client
}
if kcfg.DockerClient == nil {
kcfg.DockerClient = dockertools.ConnectToDockerOrDie(s.DockerEndpoint)
}
if kcfg.CadvisorInterface == nil {
cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
kcfg.CadvisorInterface = cadvisorInterface
}
if kcfg.Cloud == nil && s.CloudProvider != "" {
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
kcfg.Cloud = cloud
}
util.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Warning(err)
}
glog.V(2).Infof("Using root directory: %v", s.RootDirectory)
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
if err := RunKubelet(kcfg, nil); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go util.Forever(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress.String(), strconv.Itoa(s.HealthzPort)), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second)
}
if s.RunOnce {
return nil
}
// run forever
select {}
}
示例11: Run
// Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
endpoints := service.NewEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
nodeController := nodecontroller.NewNodeController(cloud, kubeClient, s.RegisterRetryCount,
s.PodEvictionTimeout, nodecontroller.NewPodEvictor(util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
if cloud == nil {
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
} else {
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
routeController.Run(s.NodeSyncPeriod)
}
}
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod)
namespaceManager := namespace.NewNamespaceManager(kubeClient, s.NamespaceSyncPeriod)
namespaceManager.Run()
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins())
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
//.........这里部分代码省略.........
示例12: Run
// Run runs the specified APIServer. This should never exit.
func (s *APIServer) Run(_ []string) error {
s.verifyPortalFlags()
if (s.EtcdConfigFile != "" && len(s.EtcdServerList) != 0) || (s.EtcdConfigFile == "" && len(s.EtcdServerList) == 0) {
glog.Fatalf("specify either --etcd-servers or --etcd-config")
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
HostNetworkSources: []string{},
})
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
kubeletClient, err := client.NewKubeletClient(&s.KubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
// "api/all=false" allows users to selectively enable specific api versions.
disableAllAPIs := false
allAPIFlagValue, ok := s.RuntimeConfig["api/all"]
if ok && allAPIFlagValue == "false" {
disableAllAPIs = true
}
// "api/legacy=false" allows users to disable legacy api versions.
// Right now, v1beta1 and v1beta2 are considered legacy.
disableLegacyAPIs := false
legacyAPIFlagValue, ok := s.RuntimeConfig["api/legacy"]
if ok && legacyAPIFlagValue == "false" {
disableLegacyAPIs = true
}
// "api/v1beta1={true|false} allows users to enable/disable v1beta1 API.
// This takes preference over api/all and api/legacy, if specified.
disableV1beta1 := disableAllAPIs || disableLegacyAPIs
disableV1beta1 = !s.getRuntimeConfigValue("api/v1beta1", !disableV1beta1)
// "api/v1beta2={true|false} allows users to enable/disable v1beta2 API.
// This takes preference over api/all and api/legacy, if specified.
disableV1beta2 := disableAllAPIs || disableLegacyAPIs
disableV1beta2 = !s.getRuntimeConfigValue("api/v1beta2", !disableV1beta2)
// "api/v1beta3={true|false} allows users to enable/disable v1beta3 API.
// This takes preference over api/all and api/legacy, if specified.
disableV1beta3 := disableAllAPIs
disableV1beta3 = !s.getRuntimeConfigValue("api/v1beta3", !disableV1beta3)
// V1 is disabled by default. Users can enable it using "api/v1={true}".
_, enableV1 := s.RuntimeConfig["api/v1"]
// TODO: expose same flags as client.BindClientConfigFlags but for a server
clientConfig := &client.Config{
Host: net.JoinHostPort(s.InsecureBindAddress.String(), strconv.Itoa(s.InsecurePort)),
Version: s.StorageVersion,
}
client, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid server address: %v", err)
}
helper, err := newEtcd(s.EtcdConfigFile, s.EtcdServerList, s.StorageVersion, s.EtcdPathPrefix)
if err != nil {
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
}
// TODO Is this the right place for migration to happen? Must *both* old and
// new etcd prefix params be supplied for this to be valid?
if s.OldEtcdPathPrefix != "" {
if err = helper.MigrateKeys(s.OldEtcdPathPrefix); err != nil {
glog.Fatalf("Migration of old etcd keys failed: %v", err)
}
}
n := net.IPNet(s.PortalNet)
// Default to the private server key for service account token signing
if s.ServiceAccountKeyFile == "" && s.TLSPrivateKeyFile != "" {
s.ServiceAccountKeyFile = s.TLSPrivateKeyFile
}
authenticator, err := apiserver.NewAuthenticator(s.BasicAuthFile, s.ClientCAFile, s.TokenAuthFile, s.ServiceAccountKeyFile, s.ServiceAccountLookup, helper)
if err != nil {
glog.Fatalf("Invalid Authentication Config: %v", err)
}
authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(s.AuthorizationMode, s.AuthorizationPolicyFile)
if err != nil {
glog.Fatalf("Invalid Authorization Config: %v", err)
}
admissionControlPluginNames := strings.Split(s.AdmissionControl, ",")
admissionController := admission.NewFromPlugins(client, admissionControlPluginNames, s.AdmissionControlConfigFile)
if len(s.ExternalHost) == 0 {
//.........这里部分代码省略.........
示例13: Run
// Run runs the specified KubeletServer. This should never exit.
func (s *KubeletServer) Run(_ []string) error {
util.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
if err := util.ApplyOomScoreAdj(0, s.OOMScoreAdj); err != nil {
glog.Info(err)
}
client, err := s.createAPIServerClient()
if err != nil && len(s.APIServerList) > 0 {
glog.Warningf("No API client: %v", err)
}
glog.Infof("Using root directory: %v", s.RootDirectory)
credentialprovider.SetPreferredDockercfgPath(s.RootDirectory)
cadvisorInterface, err := cadvisor.New(s.CadvisorPort)
if err != nil {
return err
}
imageGCPolicy := kubelet.ImageGCPolicy{
HighThresholdPercent: s.ImageGCHighThresholdPercent,
LowThresholdPercent: s.ImageGCLowThresholdPercent,
}
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
glog.Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
hostNetworkSources, err := kubelet.GetValidatedSources(strings.Split(s.HostNetworkSources, ","))
if err != nil {
return err
}
if s.TLSCertFile == "" && s.TLSPrivateKeyFile == "" {
s.TLSCertFile = path.Join(s.CertDirectory, "kubelet.crt")
s.TLSPrivateKeyFile = path.Join(s.CertDirectory, "kubelet.key")
if err := util.GenerateSelfSignedCert(util.GetHostname(s.HostnameOverride), s.TLSCertFile, s.TLSPrivateKeyFile); err != nil {
glog.Fatalf("Unable to generate self signed cert: %v", err)
}
glog.Infof("Using self-signed cert (%s, %s)", s.TLSCertFile, s.TLSPrivateKeyFile)
}
tlsOptions := &kubelet.TLSOptions{
Config: &tls.Config{
// Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability).
MinVersion: tls.VersionTLS10,
// Populate PeerCertificates in requests, but don't yet reject connections without certificates.
ClientAuth: tls.RequestClientCert,
},
CertFile: s.TLSCertFile,
KeyFile: s.TLSPrivateKeyFile,
}
kcfg := KubeletConfig{
Address: s.Address,
AllowPrivileged: s.AllowPrivileged,
HostNetworkSources: hostNetworkSources,
HostnameOverride: s.HostnameOverride,
RootDirectory: s.RootDirectory,
ConfigFile: s.Config,
ManifestURL: s.ManifestURL,
FileCheckFrequency: s.FileCheckFrequency,
HTTPCheckFrequency: s.HTTPCheckFrequency,
PodInfraContainerImage: s.PodInfraContainerImage,
SyncFrequency: s.SyncFrequency,
RegistryPullQPS: s.RegistryPullQPS,
RegistryBurst: s.RegistryBurst,
MinimumGCAge: s.MinimumGCAge,
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
MaxContainerCount: s.MaxContainerCount,
ClusterDomain: s.ClusterDomain,
ClusterDNS: s.ClusterDNS,
Runonce: s.RunOnce,
Port: s.Port,
ReadOnlyPort: s.ReadOnlyPort,
CadvisorInterface: cadvisorInterface,
EnableServer: s.EnableServer,
EnableDebuggingHandlers: s.EnableDebuggingHandlers,
DockerClient: dockertools.ConnectToDockerOrDie(s.DockerEndpoint),
KubeClient: client,
MasterServiceNamespace: s.MasterServiceNamespace,
VolumePlugins: ProbeVolumePlugins(),
NetworkPlugins: ProbeNetworkPlugins(),
NetworkPluginName: s.NetworkPluginName,
StreamingConnectionIdleTimeout: s.StreamingConnectionIdleTimeout,
TLSOptions: tlsOptions,
ImageGCPolicy: imageGCPolicy,
Cloud: cloud,
NodeStatusUpdateFrequency: s.NodeStatusUpdateFrequency,
ResourceContainer: s.ResourceContainer,
}
RunKubelet(&kcfg, nil)
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go util.Forever(func() {
//.........这里部分代码省略.........
示例14: Run
// Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error {
s.verifyMinionFlags()
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
endpoints := service.NewEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient)
controllerManager.Run(replicationControllerPkg.DefaultSyncPeriod)
cloud := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
nodeResources := &api.NodeResources{
Capacity: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(s.NodeMilliCPU, resource.DecimalSI),
api.ResourceMemory: s.NodeMemory,
},
}
if s.SyncNodeStatus {
glog.Warning("DEPRECATION NOTICE: sync_node_status flag is being deprecated. It has no effect now and it will be removed in a future version.")
}
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, s.ClusterName)
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod)
namespaceManager := namespace.NewNamespaceManager(kubeClient, s.NamespaceSyncPeriod)
namespaceManager.Run()
select {}
return nil
}
示例15: main
func main() {
flag.Parse()
util.InitLogs()
defer util.FlushLogs()
verflag.PrintAndExitIfRequested()
verifyPortalFlags()
if (*etcdConfigFile != "" && len(etcdServerList) != 0) || (*etcdConfigFile == "" && len(etcdServerList) == 0) {
glog.Fatalf("specify either -etcd_servers or -etcd_config")
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: *allowPrivileged,
})
cloud := cloudprovider.InitCloudProvider(*cloudProvider, *cloudConfigFile)
kubeletClient, err := client.NewKubeletClient(&kubeletConfig)
if err != nil {
glog.Fatalf("Failure to start kubelet client: %v", err)
}
// TODO: expose same flags as client.BindClientConfigFlags but for a server
clientConfig := &client.Config{
Host: net.JoinHostPort(address.String(), strconv.Itoa(int(*port))),
Version: *storageVersion,
}
client, err := client.New(clientConfig)
if err != nil {
glog.Fatalf("Invalid server address: %v", err)
}
helper, err := newEtcd(*etcdConfigFile, etcdServerList)
if err != nil {
glog.Fatalf("Invalid storage version or misconfigured etcd: %v", err)
}
n := net.IPNet(portalNet)
authenticator, err := apiserver.NewAuthenticatorFromTokenFile(*tokenAuthFile)
if err != nil {
glog.Fatalf("Invalid Authentication Config: %v", err)
}
authorizer, err := apiserver.NewAuthorizerFromAuthorizationConfig(*authorizationMode, *authorizationPolicyFile)
if err != nil {
glog.Fatalf("Invalid Authorization Config: %v", err)
}
config := &master.Config{
Client: client,
Cloud: cloud,
EtcdHelper: helper,
HealthCheckMinions: *healthCheckMinions,
EventTTL: *eventTTL,
KubeletClient: kubeletClient,
PortalNet: &n,
EnableLogsSupport: *enableLogsSupport,
EnableUISupport: true,
APIPrefix: *apiPrefix,
CorsAllowedOriginList: corsAllowedOriginList,
ReadOnlyPort: *readOnlyPort,
ReadWritePort: *port,
PublicAddress: *publicAddressOverride,
Authenticator: authenticator,
Authorizer: authorizer,
}
m := master.New(config)
// We serve on 3 ports. See docs/reaching_the_api.md
roLocation := ""
if *readOnlyPort != 0 {
roLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(config.ReadOnlyPort))
}
secureLocation := ""
if *securePort != 0 {
secureLocation = net.JoinHostPort(config.PublicAddress, strconv.Itoa(*securePort))
}
rwLocation := net.JoinHostPort(address.String(), strconv.Itoa(int(*port)))
// See the flag commentary to understand our assumptions when opening the read-only and read-write ports.
if roLocation != "" {
// Allow 1 read-only request per second, allow up to 20 in a burst before enforcing.
rl := util.NewTokenBucketRateLimiter(1.0, 20)
readOnlyServer := &http.Server{
Addr: roLocation,
Handler: apiserver.RecoverPanics(apiserver.ReadOnly(apiserver.RateLimit(rl, m.InsecureHandler))),
ReadTimeout: 5 * time.Minute,
WriteTimeout: 5 * time.Minute,
MaxHeaderBytes: 1 << 20,
}
glog.Infof("Serving read-only insecurely on %s", roLocation)
go func() {
defer util.HandleCrash()
for {
if err := readOnlyServer.ListenAndServe(); err != nil {
glog.Errorf("Unable to listen for read only traffic (%v); will try again.", err)
}
//.........这里部分代码省略.........