本文整理汇总了Golang中k8s/io/kubernetes/pkg/controller/persistentvolume.NewPersistentVolumeController函数的典型用法代码示例。如果您正苦于以下问题:Golang NewPersistentVolumeController函数的具体用法?Golang NewPersistentVolumeController怎么用?Golang NewPersistentVolumeController使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewPersistentVolumeController函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: createClients
func createClients(t *testing.T, s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController, watch.Interface, watch.Interface) {
// Use higher QPS and Burst, there is a test for race conditions which
// creates many objects and default values were too low.
binderClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000000, Burst: 1000000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{
PluginName: "plugin-name",
Host: host,
Config: volume.VolumeConfig{},
LastProvisionerOptions: volume.VolumeOptions{},
NewAttacherCallCount: 0,
NewDetacherCallCount: 0,
Mounters: nil,
Unmounters: nil,
Attachers: nil,
Detachers: nil,
}}
cloud := &fake_cloud.FakeCloud{}
syncPeriod := getSyncPeriod()
ctrl := persistentvolumecontroller.NewPersistentVolumeController(binderClient, syncPeriod, nil, plugins, cloud, "", nil, nil, nil)
watchPV, err := testClient.PersistentVolumes().Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumes: %v", err)
}
watchPVC, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Watch(api.ListOptions{})
if err != nil {
t.Fatalf("Failed to watch PersistentVolumeClaimss: %v", err)
}
return testClient, ctrl, watchPV, watchPVC
}
示例2: RunPersistentVolumeController
func (c *MasterConfig) RunPersistentVolumeController(client *client.Client, namespace, recyclerImageName, recyclerServiceAccountName string) {
s := c.ControllerManager
provisioner, err := kctrlmgr.NewVolumeProvisioner(c.CloudProvider, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientadapter.FromUnversionedClient(client),
s.PVClaimBinderSyncPeriod.Duration,
provisioner,
probeRecyclableVolumePlugins(s.VolumeConfiguration, namespace, recyclerImageName, recyclerServiceAccountName),
c.CloudProvider,
s.ClusterName,
nil, nil, nil,
s.VolumeConfiguration.EnableDynamicProvisioning,
)
volumeController.Run()
attachDetachController, err :=
volumecontroller.NewAttachDetachController(
clientadapter.FromUnversionedClient(client),
c.Informers.Pods().Informer(),
c.Informers.Nodes().Informer(),
c.Informers.PersistentVolumeClaims().Informer(),
c.Informers.PersistentVolumes().Informer(),
c.CloudProvider,
kctrlmgr.ProbeAttachableVolumePlugins(s.VolumeConfiguration))
if err != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", err)
} else {
go attachDetachController.Run(utilwait.NeverStop)
}
}
示例3: createClients
func createClients(s *httptest.Server) (*clientset.Clientset, *persistentvolumecontroller.PersistentVolumeController) {
// Use higher QPS and Burst, there is a test for race condition below, which
// creates many claims and default values were too low.
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}}
cloud := &fake_cloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "", nil, nil, nil)
return testClient, ctrl
}
示例4: StartControllers
//.........这里部分代码省略.........
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
groupVersion = "apps/v1alpha1"
resources, found = resourceMap[groupVersion]
glog.Infof("Attempting to start petset, full resource map %+v", resourceMap)
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "petsets") {
glog.Infof("Starting PetSet controller")
resyncPeriod := ResyncPeriod(s)()
go petset.NewPetSetController(
podInformer,
// TODO: Switch to using clientset
kubeClient,
resyncPeriod,
).Run(1, wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
provisioner,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, nil, nil,
)
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
attachDetachController, attachDetachControllerErr :=
volume.NewAttachDetachController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")),
podInformer,
nodeInformer,
pvcInformer,
pvInformer,
cloud,
ProbeAttachableVolumePlugins(s.VolumeConfiguration))
if attachDetachControllerErr != nil {
glog.Fatalf("Failed to start attach/detach controller: %v", attachDetachControllerErr)
} else {
go attachDetachController.Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
示例5: Run
//.........这里部分代码省略.........
groupVersion := "extensions/v1beta1"
resources, found := resourceMap[groupVersion]
// TODO(k8s): this needs to be dynamic so users don't have to restart their controller manager if they change the apiserver
if containsVersion(versions, groupVersion) && found {
glog.Infof("Starting %s apis", groupVersion)
if containsResource(resources, "horizontalpodautoscalers") {
glog.Infof("Starting horizontal pod controller.")
hpaClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "horizontal-pod-autoscaler"))
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
Run(wait.NeverStop)
}
if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), s.resyncPeriod, int(s.LookupCacheSizeForDaemonSet)).
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
}
if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobControllerFromClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller")), s.resyncPeriod).
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
}
if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), s.resyncPeriod).
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
}
if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), s.resyncPeriod, replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
}
}
provisioner, err := kubecontrollermanager.NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
provisioner,
kubecontrollermanager.ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil,
nil,
nil,
s.VolumeConfiguration.EnableDynamicProvisioning,
)
volumeController.Run()
var rootCA []byte
if s.RootCAFile != "" {
rootCA, err = ioutil.ReadFile(s.RootCAFile)
if err != nil {
return fmt.Errorf("error reading root-ca-file at %s: %v", s.RootCAFile, err)
}
if _, err := crypto.CertsFromPEM(rootCA); err != nil {
return fmt.Errorf("error parsing root-ca-file at %s: %v", s.RootCAFile, err)
}
} else {
rootCA = kubeconfig.CAData
}
if len(s.ServiceAccountKeyFile) > 0 {
privateKey, err := serviceaccount.ReadPrivateKey(s.ServiceAccountKeyFile)
if err != nil {
glog.Errorf("Error reading key for service account token controller: %v", err)
} else {
serviceaccountcontroller.NewTokensController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "tokens-controller")),
serviceaccountcontroller.TokensControllerOptions{
TokenGenerator: serviceaccount.JWTTokenGenerator(privateKey),
RootCA: rootCA,
},
).Run()
}
}
serviceaccountcontroller.NewServiceAccountsController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "service-account-controller")),
serviceaccountcontroller.DefaultServiceAccountsControllerOptions(),
).Run()
select {}
}
示例6: TestPersistentVolumeRecycler
func TestPersistentVolumeRecycler(t *testing.T) {
_, s := framework.RunAMaster(t)
defer s.Close()
deleteAllEtcdKeys()
// Use higher QPS and Burst, there is a test for race condition below, which
// creates many claims and default values were too low.
testClient := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Default.GroupVersion()}, QPS: 1000, Burst: 100000})
host := volumetest.NewFakeVolumeHost("/tmp/fake", nil, nil)
plugins := []volume.VolumePlugin{&volumetest.FakeVolumePlugin{"plugin-name", host, volume.VolumeConfig{}, volume.VolumeOptions{}, 0, 0, nil, nil, nil, nil}}
cloud := &fake_cloud.FakeCloud{}
ctrl := persistentvolumecontroller.NewPersistentVolumeController(testClient, 10*time.Second, nil, plugins, cloud, "", nil, nil, nil)
ctrl.Run()
defer ctrl.Stop()
// This PV will be claimed, released, and recycled.
pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{Name: "fake-pv"},
Spec: api.PersistentVolumeSpec{
PersistentVolumeSource: api.PersistentVolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/tmp/foo"}},
Capacity: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("10G")},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
},
}
pvc := &api.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{Name: "fake-pvc"},
Spec: api.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{Requests: api.ResourceList{api.ResourceName(api.ResourceStorage): resource.MustParse("5G")}},
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
},
}
w, _ := testClient.PersistentVolumes().Watch(api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
// wait until the binder pairs the volume and claim
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
waitForPersistentVolumePhase(w, api.VolumeAvailable)
// end of Recycler test.
// Deleter test begins now.
// tests are serial because running masters concurrently that delete keys may cause similar tests to time out
deleteAllEtcdKeys()
// change the reclamation policy of the PV for the next test
pv.Spec.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimDelete
w, _ = testClient.PersistentVolumes().Watch(api.ListOptions{})
defer w.Stop()
_, _ = testClient.PersistentVolumes().Create(pv)
_, _ = testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(pvc)
waitForPersistentVolumePhase(w, api.VolumeBound)
// deleting a claim releases the volume, after which it can be recycled
if err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Delete(pvc.Name, nil); err != nil {
t.Errorf("error deleting claim %s", pvc.Name)
}
waitForPersistentVolumePhase(w, api.VolumeReleased)
for {
event := <-w.ResultChan()
if event.Type == watch.Deleted {
break
}
}
// test the race between claims and volumes. ensure only a volume only binds to a single claim.
deleteAllEtcdKeys()
counter := 0
maxClaims := 100
claims := []*api.PersistentVolumeClaim{}
for counter <= maxClaims {
counter += 1
clone, _ := conversion.NewCloner().DeepCopy(pvc)
newPvc, _ := clone.(*api.PersistentVolumeClaim)
newPvc.ObjectMeta = api.ObjectMeta{Name: fmt.Sprintf("fake-pvc-%d", counter)}
claim, err := testClient.PersistentVolumeClaims(api.NamespaceDefault).Create(newPvc)
if err != nil {
t.Fatal("Error creating newPvc: %v", err)
}
claims = append(claims, claim)
}
//.........这里部分代码省略.........