本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/serviceaccount.DefaultServiceAccountsControllerOptions函数的典型用法代码示例。如果您正苦于以下问题:Golang DefaultServiceAccountsControllerOptions函数的具体用法?Golang DefaultServiceAccountsControllerOptions怎么用?Golang DefaultServiceAccountsControllerOptions使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DefaultServiceAccountsControllerOptions函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: startServiceAccountController
func startServiceAccountController(ctx ControllerContext) (bool, error) {
go serviceaccountcontroller.NewServiceAccountsController(
ctx.InformerFactory.ServiceAccounts(),
ctx.InformerFactory.Namespaces(),
ctx.ClientBuilder.ClientOrDie("service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run(1, ctx.Stop)
return true, nil
}
示例2: RunServiceAccountsController
// RunServiceAccountsController starts the service account controller
func (c *MasterConfig) RunServiceAccountsController() {
if len(c.Options.ServiceAccountConfig.ManagedNames) == 0 {
glog.Infof("Skipped starting Service Account Manager, no managed names specified")
return
}
options := serviceaccount.DefaultServiceAccountsControllerOptions()
options.Names = util.NewStringSet(c.Options.ServiceAccountConfig.ManagedNames...)
serviceaccount.NewServiceAccountsController(c.KubeClient(), options).Run()
glog.Infof("Started Service Account Manager")
}
示例3: RunServiceAccountsController
// RunServiceAccountsController starts the service account controller
func (c *MasterConfig) RunServiceAccountsController() {
if len(c.Options.ServiceAccountConfig.ManagedNames) == 0 {
glog.Infof("Skipped starting Service Account Manager, no managed names specified")
return
}
options := sacontroller.DefaultServiceAccountsControllerOptions()
options.ServiceAccounts = []kapi.ServiceAccount{}
for _, saName := range c.Options.ServiceAccountConfig.ManagedNames {
sa := kapi.ServiceAccount{}
sa.Name = saName
options.ServiceAccounts = append(options.ServiceAccounts, sa)
}
sacontroller.NewServiceAccountsController(clientadapter.FromUnversionedClient(c.KubeClient()), options).Run()
}
示例4: startServiceAccountTestServer
// startServiceAccountTestServer returns a started server
// It is the responsibility of the caller to ensure the returned stopFunc is called
func startServiceAccountTestServer(t *testing.T) (*clientset.Clientset, restclient.Config, func()) {
deleteAllEtcdKeys()
// Listener
var m *master.Master
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
// Anonymous client config
clientConfig := restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}}
// Root client
// TODO: remove rootClient after we refactor pkg/admission to use the clientset.
rootClientset := clientset.NewForConfigOrDie(&restclient.Config{Host: apiServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, BearerToken: rootToken})
// Set up two authenticators:
// 1. A token authenticator that maps the rootToken to the "root" user
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
if token == rootToken {
return &user.DefaultInfo{rootUserName, "", []string{}}, true, nil
}
return nil, false, nil
})
serviceAccountKey, _ := rsa.GenerateKey(rand.Reader, 2048)
serviceAccountTokenGetter := serviceaccountcontroller.NewGetterFromClient(rootClientset)
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]*rsa.PublicKey{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
authenticator := union.New(
bearertoken.New(rootTokenAuth),
bearertoken.New(serviceAccountTokenAuth),
)
// Set up a stub authorizer:
// 1. The "root" user is allowed to do anything
// 2. ServiceAccounts named "ro" are allowed read-only operations in their namespace
// 3. ServiceAccounts named "rw" are allowed any operation in their namespace
authorizer := authorizer.AuthorizerFunc(func(attrs authorizer.Attributes) error {
username := attrs.GetUserName()
ns := attrs.GetNamespace()
// If the user is "root"...
if username == rootUserName {
// allow them to do anything
return nil
}
// If the user is a service account...
if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil {
// Limit them to their own namespace
if serviceAccountNamespace == ns {
switch serviceAccountName {
case readOnlyServiceAccountName:
if attrs.IsReadOnly() {
return nil
}
case readWriteServiceAccountName:
return nil
}
}
}
return fmt.Errorf("User %s is denied (ns=%s, readonly=%v, resource=%s)", username, ns, attrs.IsReadOnly(), attrs.GetResource())
})
// Set up admission plugin to auto-assign serviceaccounts to pods
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClientset)
masterConfig := framework.NewMasterConfig()
masterConfig.EnableIndex = true
masterConfig.Authenticator = authenticator
masterConfig.Authorizer = authorizer
masterConfig.AdmissionControl = serviceAccountAdmission
// Create a master and install handlers into mux.
m, err := master.New(masterConfig)
if err != nil {
t.Fatalf("Error in bringing up the master: %v", err)
}
// Start the service account and service account token controllers
tokenController := serviceaccountcontroller.NewTokensController(rootClientset, serviceaccountcontroller.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
tokenController.Run()
serviceAccountController := serviceaccountcontroller.NewServiceAccountsController(rootClientset, serviceaccountcontroller.DefaultServiceAccountsControllerOptions())
serviceAccountController.Run()
// Start the admission plugin reflectors
serviceAccountAdmission.Run()
stop := func() {
tokenController.Stop()
serviceAccountController.Stop()
serviceAccountAdmission.Stop()
// TODO: Uncomment when fix #19254
// apiServer.Close()
}
return rootClientset, clientConfig, stop
}
示例5: Run
//.........这里部分代码省略.........
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO(k8s): this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, int(s.LookupCacheSizeForDaemonSet)).
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
}
}
alphaProvisioner, err := kubecontrollermanager.NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
alphaProvisioner,
kubecontrollermanager.ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, // volumeSource
nil, // claimSource
nil, // classSource
nil, // eventRecorder
s.VolumeConfiguration.EnableDynamicProvisioning,
)
volumeController.Run(wait.NeverStop)
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := crypto.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
go serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run(int(s.ConcurrentSATokenSyncs), wait.NeverStop)
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例6: Run
//.........这里部分代码省略.........
endpoints := endpointcontroller.NewEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
controllerManager := replicationControllerPkg.NewReplicationManager(kubeClient, replicationControllerPkg.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
go job.NewJobManager(kubeClient).
Run(s.ConcurrentJobSyncs, util.NeverStop)
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
if cloud == nil {
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
} else {
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
routeController.Run(s.NodeSyncPeriod)
}
}
resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)
// An OR of all flags to enable/disable experimental features
experimentalMode := s.EnableHorizontalPodAutoscaler || s.EnableDeploymentController
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, experimentalMode, s.NamespaceSyncPeriod)
namespaceController.Run()
if s.EnableHorizontalPodAutoscaler {
horizontalPodAutoscalerController := podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient))
horizontalPodAutoscalerController.Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if s.EnableDeploymentController {
deploymentController := deployment.New(kubeClient)
deploymentController.Run(s.DeploymentControllerSyncPeriod)
}
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccount.NewTokensController(
kubeClient,
serviceaccount.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccount.NewServiceAccountsController(
kubeClient,
serviceaccount.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例7: StartControllers
//.........这里部分代码省略.........
glog.Infof("Starting scheduledjob controller")
// // TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
kubeconfig.ContentConfig.GroupVersion = &unversioned.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
go scheduledjob.NewScheduledJobController(client("scheduledjob-controller")).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
} else {
glog.Infof("Not starting %s apis", groupVersion)
}
alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatalf("An backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
}
params := persistentvolumecontroller.ControllerParameters{
KubeClient: client("persistent-volume-binder"),
SyncPeriod: s.PVClaimBinderSyncPeriod.Duration,
AlphaProvisioner: alphaProvisioner,
VolumePlugins: ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
Cloud: cloud,
ClusterName: s.ClusterName,
EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning,
}
volumeController := persistentvolumecontroller.NewController(params)
volumeController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController(
client("attachdetach-controller"),
sharedInformers.Pods().Informer(),
sharedInformers.Nodes().Informer(),
sharedInformers.PersistentVolumeClaims().Informer(),
sharedInformers.PersistentVolumes().Informer(),
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration),
recorder)
if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
}
go attachDetachController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
groupVersion = "certificates.k8s.io/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start certificates, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "certificatesigningrequests") {
glog.Infof("Starting certificate request controller")
resyncPeriod := ResyncPeriod(s)()
certController, err := certcontroller.NewCertificateController(
client("certificate-controller"),
resyncPeriod,
s.ClusterSigningCertFile,
s.ClusterSigningKeyFile,
s.ApproveAllKubeletCSRsForGroup,
)
if err != nil {
glog.Errorf("Failed to start certificate controller: %v", err)
} else {
go certController.Run(1, wait.NeverStop)
}
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
serviceaccountcontroller.NewServiceAccountsController(
client("service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.EnableGarbageCollector {
gcClientset := client("generic-garbage-collector")
groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
config := restclient.AddUserAgent(kubeconfig, "generic-garbage-collector")
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, groupVersionResources)
if err != nil {
glog.Errorf("Failed to start the generic garbage collector: %v", err)
} else {
workers := int(s.ConcurrentGCSyncs)
go garbageCollector.Run(workers, wait.NeverStop)
}
}
sharedInformers.Start(stop)
select {}
}
示例8: Run
//.........这里部分代码省略.........
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
var versionStrings []string
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
if versionStrings, err = client.ServerAPIVersions(kubeconfig); err == nil {
return true, nil
}
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
})
if err != nil {
glog.Fatalf("Failed to get api versions from server: %v", err)
}
versions := &unversioned.APIVersions{Versions: versionStrings}
resourceMap, err := kubeClient.Discovery().ServerResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespacecontroller.NewNamespaceController(kubeClient, versions, s.NamespaceSyncPeriod).Run()
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
metricsClient := metrics.NewHeapsterMetricsClient(kubeClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
podautoscaler.NewHorizontalController(kubeClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentJobSyncs, util.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
deployment.New(kubeClient).
Run(s.DeploymentControllerSyncPeriod)
}
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccount.NewTokensController(
kubeClient,
serviceaccount.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccount.NewServiceAccountsController(
kubeClient,
serviceaccount.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例9: startServiceAccountTestServer
//.........这里部分代码省略.........
storageDestinations.AddAPIGroup("extensions", expEtcdStorage)
storageVersions := make(map[string]string)
storageVersions[""] = testapi.Default.Version()
storageVersions["extensions"] = testapi.Extensions.GroupAndVersion()
// Listener
var m *master.Master
apiServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
m.Handler.ServeHTTP(w, req)
}))
// Anonymous client config
clientConfig := client.Config{Host: apiServer.URL, Version: testapi.Default.Version()}
// Root client
rootClient := client.NewOrDie(&client.Config{Host: apiServer.URL, Version: testapi.Default.Version(), BearerToken: rootToken})
// Set up two authenticators:
// 1. A token authenticator that maps the rootToken to the "root" user
// 2. A ServiceAccountToken authenticator that validates ServiceAccount tokens
rootTokenAuth := authenticator.TokenFunc(func(token string) (user.Info, bool, error) {
if token == rootToken {
return &user.DefaultInfo{rootUserName, "", "", "", []string{}, ""}, true, nil
}
return nil, false, nil
})
serviceAccountKey, err := rsa.GenerateKey(rand.Reader, 2048)
serviceAccountTokenGetter := serviceaccount.NewGetterFromClient(rootClient)
serviceAccountTokenAuth := serviceaccount.JWTTokenAuthenticator([]*rsa.PublicKey{&serviceAccountKey.PublicKey}, true, serviceAccountTokenGetter)
authenticator := union.New(
bearertoken.New(rootTokenAuth),
bearertoken.New(serviceAccountTokenAuth),
)
// Set up a stub authorizer:
// 1. The "root" user is allowed to do anything
// 2. ServiceAccounts named "ro" are allowed read-only operations in their namespace
// 3. ServiceAccounts named "rw" are allowed any operation in their namespace
authorizer := authorizer.AuthorizerFunc(func(attrs authorizer.Attributes) (string, error) {
username := attrs.GetUserName()
ns := attrs.GetNamespace()
// If the user is "root"...
if username == rootUserName {
// allow them to do anything
return "", nil
}
// If the user is a service account...
if serviceAccountNamespace, serviceAccountName, err := serviceaccount.SplitUsername(username); err == nil {
// Limit them to their own namespace
if serviceAccountNamespace == ns {
switch serviceAccountName {
case readOnlyServiceAccountName:
if attrs.IsReadOnly() {
return "", nil
}
case readWriteServiceAccountName:
return "", nil
}
}
}
return "", fmt.Errorf("User %s is denied (ns=%s, readonly=%v, resource=%s)", username, ns, attrs.IsReadOnly(), attrs.GetResource())
})
// Set up admission plugin to auto-assign serviceaccounts to pods
serviceAccountAdmission := serviceaccountadmission.NewServiceAccount(rootClient)
// Create a master and install handlers into mux.
m = master.New(&master.Config{
StorageDestinations: storageDestinations,
KubeletClient: client.FakeKubeletClient{},
EnableLogsSupport: false,
EnableUISupport: false,
EnableIndex: true,
APIPrefix: "/api",
Authenticator: authenticator,
Authorizer: authorizer,
AdmissionControl: serviceAccountAdmission,
StorageVersions: storageVersions,
})
// Start the service account and service account token controllers
tokenController := serviceaccount.NewTokensController(rootClient, serviceaccount.TokensControllerOptions{TokenGenerator: serviceaccount.JWTTokenGenerator(serviceAccountKey)})
tokenController.Run()
serviceAccountController := serviceaccount.NewServiceAccountsController(rootClient, serviceaccount.DefaultServiceAccountsControllerOptions())
serviceAccountController.Run()
// Start the admission plugin reflectors
serviceAccountAdmission.Run()
stop := func() {
tokenController.Stop()
serviceAccountController.Stop()
serviceAccountAdmission.Stop()
apiServer.Close()
}
return rootClient, clientConfig, stop
}
示例10: Run
//.........这里部分代码省略.........
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
profile.InstallHandler(mux)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
endpoints := s.createEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
controllerManager := replicationcontroller.NewReplicationManager(kubeClient, replicationcontroller.BurstReplicas)
go controllerManager.Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
//TODO(jdef) should eventually support more cloud providers here
if s.CloudProvider != mesos.ProviderName {
glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
routes, ok := cloud.Routes()
if !ok {
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
}
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
routeController.Run(s.NodeSyncPeriod)
}
resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, false, s.NamespaceSyncPeriod)
namespaceController.Run()
pvclaimBinder := volumeclaimbinder.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := volumeclaimbinder.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccount.NewTokensController(
kubeClient,
serviceaccount.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccount.NewServiceAccountsController(
kubeClient,
serviceaccount.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例11: StartControllers
//.........这里部分代码省略.........
kubeClient,
resyncPeriod,
).Run(1, wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
provisioner,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, nil, nil,
)
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
volume.NewAttachDetachController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")),
podInformer,
nodeInformer,
pvcInformer,
pvInformer,
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration))
if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
} else {
go attachDetachController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := crypto.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.EnableGarbageCollector {
gcClientset := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector"))
groupVersionResources, err := gcClientset.Discovery().ServerPreferredResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
clientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "generic-garbage-collector"), dynamic.LegacyAPIPathResolverFunc)
garbageCollector, err := garbagecollector.NewGarbageCollector(clientPool, groupVersionResources)
if err != nil {
glog.Errorf("Failed to start the generic garbage collector")
} else {
// TODO: make this a flag of kube-controller-manager
workers := 5
go garbageCollector.Run(workers, wait.NeverStop)
}
}
// run the shared informers
for _, informer := range informers {
go informer.Run(wait.NeverStop)
}
select {}
}
示例12: StartControllers
//.........这里部分代码省略.........
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), ResyncPeriod(s)).
Run(s.ConcurrentJobSyncs, wait.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
Run(s.ConcurrentRSSyncs, wait.NeverStop)
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例13: Run
//.........这里部分代码省略.........
namespaceController := namespacecontroller.NewNamespaceController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "namespace-controller")), &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
namespaceController.Run()
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO(k8s): this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
Run(s.ConcurrentJobSyncs, util.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
}
}
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")), s.PVClaimBinderSyncPeriod, kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例14: Run
//.........这里部分代码省略.........
namespacecontroller.NewNamespaceController(clientForUserAgentOrDie(*kubeconfig, "namespace-controller"), versions, s.NamespaceSyncPeriod).Run()
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := clientForUserAgentOrDie(*kubeconfig, "horizontal-pod-autoscaler")
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient, hpaClient, hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientForUserAgentOrDie(*kubeconfig, "daemon-set-controller"), ResyncPeriod(s)).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientForUserAgentOrDie(*kubeconfig, "job-controller"), ResyncPeriod(s)).
Run(s.ConcurrentJobSyncs, util.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientForUserAgentOrDie(*kubeconfig, "deployment-controller"), ResyncPeriod(s)).
Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-binder"), s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-recycler"), s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-provisioner")), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientForUserAgentOrDie(*kubeconfig, "tokens-controller"),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientForUserAgentOrDie(*kubeconfig, "service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例15: StartControllers
//.........这里部分代码省略.........
if availableResources[schema.GroupVersionResource{Group: "batch", Version: "v2alpha1", Resource: "cronjobs"}] {
glog.Infof("Starting cronjob controller")
// TODO: this is a temp fix for allowing kubeClient list v2alpha1 sj, should switch to using clientset
cronjobConfig := rootClientBuilder.ConfigOrDie("cronjob-controller")
cronjobConfig.ContentConfig.GroupVersion = &schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
go cronjob.NewCronJobController(clientset.NewForConfigOrDie(cronjobConfig)).Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
alphaProvisioner, err := NewAlphaVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
return fmt.Errorf("an backward-compatible provisioner could not be created: %v, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.", err)
}
params := persistentvolumecontroller.ControllerParameters{
KubeClient: clientBuilder.ClientOrDie("persistent-volume-binder"),
SyncPeriod: s.PVClaimBinderSyncPeriod.Duration,
AlphaProvisioner: alphaProvisioner,
VolumePlugins: ProbeControllerVolumePlugins(cloud, s.VolumeConfiguration),
Cloud: cloud,
ClusterName: s.ClusterName,
EnableDynamicProvisioning: s.VolumeConfiguration.EnableDynamicProvisioning,
}
volumeController := persistentvolumecontroller.NewController(params)
volumeController.Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
attachdetach.NewAttachDetachController(
clientBuilder.ClientOrDie("attachdetach-controller"),
sharedInformers.Pods().Informer(),
sharedInformers.Nodes().Informer(),
sharedInformers.PersistentVolumeClaims().Informer(),
sharedInformers.PersistentVolumes().Informer(),
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration))
if attachDetachControllerErr != nil {
return fmt.Errorf("failed to start attach/detach controller: %v", attachDetachControllerErr)
}
go attachDetachController.Run(stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if availableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1alpha1", Resource: "certificatesigningrequests"}] {
glog.Infof("Starting certificate request controller")
resyncPeriod := ResyncPeriod(s)()
c := clientBuilder.ClientOrDie("certificate-controller")
certController, err := certcontroller.NewCertificateController(
c,
resyncPeriod,
s.ClusterSigningCertFile,
s.ClusterSigningKeyFile,
certcontroller.NewGroupApprover(c.Certificates().CertificateSigningRequests(), s.ApproveAllKubeletCSRsForGroup),
)
if err != nil {
glog.Errorf("Failed to start certificate controller: %v", err)
} else {
go certController.Run(1, stop)
}
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
go serviceaccountcontroller.NewServiceAccountsController(
sharedInformers.ServiceAccounts(), sharedInformers.Namespaces(),
clientBuilder.ClientOrDie("service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run(1, stop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if s.EnableGarbageCollector {
// TODO: should use a dynamic RESTMapper built from the discovery results.
restMapper := registered.RESTMapper()
gcClientset := clientBuilder.ClientOrDie("generic-garbage-collector")
preferredResources, err := gcClientset.Discovery().ServerPreferredResources()
if err != nil {
return fmt.Errorf("failed to get supported resources from server: %v", err)
}
deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete"}}, preferredResources)
deletableGroupVersionResources, err := discovery.GroupVersionResources(deletableResources)
if err != nil {
glog.Errorf("Failed to parse resources from server: %v", err)
}
config := rootClientBuilder.ConfigOrDie("generic-garbage-collector")
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig = dynamic.ContentConfig()
clientPool := dynamic.NewClientPool(config, restMapper, dynamic.LegacyAPIPathResolverFunc)
garbageCollector, err := garbagecollector.NewGarbageCollector(metaOnlyClientPool, clientPool, restMapper, deletableGroupVersionResources)
if err != nil {
glog.Errorf("Failed to start the generic garbage collector: %v", err)
} else {
workers := int(s.ConcurrentGCSyncs)
go garbageCollector.Run(workers, stop)
}
}
sharedInformers.Start(stop)
select {}
}