本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/persistentvolume.NewPersistentVolumeClaimBinder函数的典型用法代码示例。如果您正苦于以下问题:Golang NewPersistentVolumeClaimBinder函数的具体用法?Golang NewPersistentVolumeClaimBinder怎么用?Golang NewPersistentVolumeClaimBinder使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewPersistentVolumeClaimBinder函数的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: RunPersistentVolumeClaimBinder
// RunPersistentVolumeClaimBinder starts the Kubernetes Persistent Volume Claim Binder
func (c *MasterConfig) RunPersistentVolumeClaimBinder() {
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(c.KubeClient, c.ControllerManager.PVClaimBinderSyncPeriod)
binder.Run()
}
示例2: Run
//.........这里部分代码省略.........
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
// If apiserver is not running we should wait for some time and fail only then. This is particularly
// important when we start apiserver and controller manager at the same time.
var versionStrings []string
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
if versionStrings, err = client.ServerAPIVersions(kubeconfig); err == nil {
return true, nil
}
glog.Errorf("Failed to get api versions from server: %v", err)
return false, nil
})
if err != nil {
glog.Fatalf("Failed to get api versions from server: %v", err)
}
versions := &unversioned.APIVersions{Versions: versionStrings}
resourceMap, err := kubeClient.Discovery().ServerResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespacecontroller.NewNamespaceController(kubeClient, versions, s.NamespaceSyncPeriod).Run()
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
metricsClient := metrics.NewHeapsterMetricsClient(kubeClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
podautoscaler.NewHorizontalController(kubeClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentJobSyncs, util.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
deployment.New(kubeClient).
Run(s.DeploymentControllerSyncPeriod)
}
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccount.NewTokensController(
kubeClient,
serviceaccount.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccount.NewServiceAccountsController(
kubeClient,
serviceaccount.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例3: TestPersistentVolumeRecycler
func TestPersistentVolumeRecycler(t *testing.T) {
_, s := framework.RunAMaster(t)
defer s.Close()
deleteAllEtcdKeys()
binderClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
testClient := client.NewOrDie(&client.Config{Host: s.URL, ContentConfig: client.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}})
host := volume.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}}}
cloud := &fake_cloud.FakeCloud{}
binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 10*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 30*time.Second, plugins, cloud)
recycler.Run()
defer recycler.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
},
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
},
}
w, _ := testClient.PersistentVolumes().Watch(api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
waitForPersistentVolumePhase(w, api.VolumeAvailable)
// end of Recycler test.
// Deleter test begins now.
// tests are serial because running masters concurrently that delete keys may cause similar tests to time out
deleteAllEtcdKeys()
// change the reclamation policy of the PV for the next test
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
w, _ = testClient.PersistentVolumes().Watch(api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
for {
event := <-w.ResultChan()
if event.Type == watch.Deleted {
break
}
}
// test the race between claims and volumes. ensure only a volume only binds to a single claim.
deleteAllEtcdKeys()
counter := 0
maxClaims := 100
claims := []*api.PersistentVolumeClaim{}
for counter <= maxClaims {
counter += 1
clone, _ := conversion.NewCloner().DeepCopy(pvc)
newPvc, _ := clone.(*api.PersistentVolumeClaim)
newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(newPvc)
if err != nil {
//.........这里部分代码省略.........
示例4: StartControllers
//.........这里部分代码省略.........
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "job-controller")), ResyncPeriod(s)).
Run(s.ConcurrentJobSyncs, wait.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
Run(s.ConcurrentRSSyncs, wait.NeverStop)
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(client.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例5: Run
//.........这里部分代码省略.........
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, s.LookupCacheSizeForDaemonSet).
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
Run(s.ConcurrentJobSyncs, wait.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, s.LookupCacheSizeForRS).
Run(s.ConcurrentRSSyncs, wait.NeverStop)
}
}
volumePlugins := kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-controller"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := crypto.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例6: Run
func (s *CMServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
kubeconfig.QPS = 20.0
kubeconfig.Burst = 30
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
profile.InstallHandler(mux)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
endpoints := s.createEndpointController(kubeClient)
go endpoints.Run(s.ConcurrentEndpointSyncs, util.NeverStop)
go replicationcontroller.NewReplicationManager(kubeClient, s.resyncPeriod, replicationcontroller.BurstReplicas).
Run(s.ConcurrentRCSyncs, util.NeverStop)
go daemon.NewDaemonSetsController(kubeClient, s.resyncPeriod).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
//TODO(jdef) should eventually support more cloud providers here
if s.CloudProvider != mesos.ProviderName {
glog.Fatalf("Only provider %v is supported, you specified %v", mesos.ProviderName, s.CloudProvider)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
nodeStatusUpdaterController := node.NewStatusUpdater(kubeClient, s.NodeMonitorPeriod, time.Now)
if err := nodeStatusUpdaterController.Run(util.NeverStop); err != nil {
glog.Fatalf("Failed to start node status update controller: %v", err)
}
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
routes, ok := cloud.Routes()
if !ok {
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
}
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
routeController.Run(s.NodeSyncPeriod)
}
resourceQuotaController := resourcequotacontroller.NewResourceQuotaController(kubeClient)
resourceQuotaController.Run(s.ResourceQuotaSyncPeriod)
namespaceController := namespacecontroller.NewNamespaceController(kubeClient, &unversioned.APIVersions{}, s.NamespaceSyncPeriod)
namespaceController.Run()
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(kubeClient, s.PVClaimBinderSyncPeriod, app.ProbeRecyclableVolumePlugins(s.VolumeConfigFlags))
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
//.........这里部分代码省略.........
示例7: TestPersistentVolumeRecycler
func TestPersistentVolumeRecycler(t *testing.T) {
_, s := framework.RunAMaster(t)
defer s.Close()
deleteAllEtcdKeys()
binderClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
recyclerClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
testClient := client.NewOrDie(&client.Config{Host: s.URL, Version: testapi.Default.Version()})
binder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(binderClient, 1*time.Second)
binder.Run()
defer binder.Stop()
recycler, _ := persistentvolumecontroller.NewPersistentVolumeRecycler(recyclerClient, 1*time.Second, []volume.VolumePlugin{&volume.FakeVolumePlugin{"plugin-name", volume.NewFakeVolumeHost("/tmp/fake", nil, nil)}})
recycler.Run()
defer recycler.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
},
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
},
}
w, _ := testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
waitForPersistentVolumePhase(w, api.VolumeAvailable)
// end of Recycler test.
// Deleter test begins now.
// tests are serial because running masters concurrently that delete keys may cause similar tests to time out
deleteAllEtcdKeys()
// change the reclamation policy of the PV for the next test
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
w, _ = testClient.PersistentVolumes().Watch(labels.Everything(), fields.Everything(), api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
for {
event := <-w.ResultChan()
if event.Type == watch.Deleted {
break
}
}
}
示例8: Run
//.........这里部分代码省略.........
namespacecontroller.NewNamespaceController(clientForUserAgentOrDie(*kubeconfig, "namespace-controller"), versions, s.NamespaceSyncPeriod).Run()
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO: this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := clientForUserAgentOrDie(*kubeconfig, "horizontal-pod-autoscaler")
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
podautoscaler.NewHorizontalController(hpaClient, hpaClient, hpaClient, metricsClient).
Run(s.HorizontalPodAutoscalerSyncPeriod)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(clientForUserAgentOrDie(*kubeconfig, "daemon-set-controller"), ResyncPeriod(s)).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(clientForUserAgentOrDie(*kubeconfig, "job-controller"), ResyncPeriod(s)).
Run(s.ConcurrentJobSyncs, util.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientForUserAgentOrDie(*kubeconfig, "deployment-controller"), ResyncPeriod(s)).
Run(s.ConcurrentDeploymentSyncs, util.NeverStop)
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfigFlags)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfigFlags)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-binder"), s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-recycler"), s.PVClaimBinderSyncPeriod, ProbeRecyclableVolumePlugins(s.VolumeConfigFlags), cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientForUserAgentOrDie(*kubeconfig, "persistent-volume-provisioner")), s.PVClaimBinderSyncPeriod, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := util.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientForUserAgentOrDie(*kubeconfig, "tokens-controller"),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientForUserAgentOrDie(*kubeconfig, "service-account-controller"),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例9: RunPersistentVolumeClaimBinder
// RunPersistentVolumeClaimBinder starts the Kubernetes Persistent Volume Claim Binder
func (c *MasterConfig) RunPersistentVolumeClaimBinder(client *client.Client) {
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(clientadapter.FromUnversionedClient(client), c.ControllerManager.PVClaimBinderSyncPeriod.Duration)
binder.Run()
}
示例10: StartControllers
//.........这里部分代码省略.........
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
groupVersion = "apps/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start petset, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "petsets") {
glog.Infof("Starting PetSet controller")
resyncPeriod := ResyncPeriod(s)()
go petset.NewPetSetController(
podInformer,
// TODO: Switch to using clientset
kubeClient,
resyncPeriod,
).Run(1, wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
go volume.NewAttachDetachController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, ResyncPeriod(s)()).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
var rootCA []byte
if s.RootCAFile != "" {
示例11: Run
// Run runs the CMServer. This should never exit.
func (s *CMServer) Run(_ []string) error {
if s.Kubeconfig == "" && s.Master == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.")
}
// This creates a client, first loading any specified kubeconfig
// file, and then overriding the Master flag, if non-empty.
kubeconfig, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.Kubeconfig},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.Master}}).ClientConfig()
if err != nil {
return err
}
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeApiQps
kubeconfig.Burst = s.KubeApiBurst
kubeClient, err := client.New(kubeconfig)
if err != nil {
glog.Fatalf("Invalid API configuration: %v", err)
}
go func() {
mux := http.NewServeMux()
healthz.InstallHandler(mux)
if s.EnableProfiling {
mux.HandleFunc("/debug/pprof/", pprof.Index)
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
mux.Handle("/metrics", prometheus.Handler())
server := &http.Server{
Addr: net.JoinHostPort(s.Address.String(), strconv.Itoa(s.Port)),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
}()
go endpointcontroller.NewEndpointController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentEndpointSyncs, util.NeverStop)
go replicationcontroller.NewReplicationManager(kubeClient, s.ResyncPeriod, replicationcontroller.BurstReplicas).
Run(s.ConcurrentRCSyncs, util.NeverStop)
if s.TerminatedPodGCThreshold > 0 {
go gc.New(kubeClient, s.ResyncPeriod, s.TerminatedPodGCThreshold).
Run(util.NeverStop)
}
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
glog.Fatalf("Cloud provider could not be initialized: %v", err)
}
nodeController := nodecontroller.NewNodeController(cloud, kubeClient,
s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, &s.ClusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod)
serviceController := servicecontroller.New(cloud, kubeClient, s.ClusterName)
if err := serviceController.Run(s.ServiceSyncPeriod, s.NodeSyncPeriod); err != nil {
glog.Errorf("Failed to start service controller: %v", err)
}
if s.AllocateNodeCIDRs {
if cloud == nil {
glog.Warning("allocate-node-cidrs is set, but no cloud provider specified. Will not manage routes.")
} else if routes, ok := cloud.Routes(); !ok {
glog.Warning("allocate-node-cidrs is set, but cloud provider does not support routes. Will not manage routes.")
} else {
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, &s.ClusterCIDR)
routeController.Run(s.NodeSyncPeriod)
}
}
resourcequotacontroller.NewResourceQuotaController(kubeClient).Run(s.ResourceQuotaSyncPeriod)
namespacecontroller.NewNamespaceController(kubeClient, s.EnableExperimental, s.NamespaceSyncPeriod).Run()
if s.EnableExperimental {
go daemon.NewDaemonSetsController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentDSCSyncs, util.NeverStop)
go job.NewJobController(kubeClient, s.ResyncPeriod).
Run(s.ConcurrentJobSyncs, util.NeverStop)
podautoscaler.NewHorizontalController(kubeClient, metrics.NewHeapsterMetricsClient(kubeClient)).
Run(s.HorizontalPodAutoscalerSyncPeriod)
deployment.New(kubeClient).
Run(s.DeploymentControllerSyncPeriod)
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(kubeClient, s.PVClaimBinderSyncPeriod)
pvclaimBinder.Run()
//.........这里部分代码省略.........
示例12: RunPersistentVolumeClaimBinder
// RunPersistentVolumeClaimBinder starts the Kubernetes Persistent Volume Claim Binder
func (c *MasterConfig) RunPersistentVolumeClaimBinder() {
binder := volumeclaimbinder.NewPersistentVolumeClaimBinder(c.KubeClient, c.ControllerManager.PVClaimBinderSyncPeriod)
binder.Run()
glog.Infof("Started Kubernetes Persistent Volume Claim Binder")
}