本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/testing/core.DefaultWatchReactor函数的典型用法代码示例。如果您正苦于以下问题:Golang DefaultWatchReactor函数的具体用法?Golang DefaultWatchReactor怎么用?Golang DefaultWatchReactor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DefaultWatchReactor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: controllerSetup
func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *fake.Clientset, *watch.FakeWatcher, *ServiceServingCertController) {
certDir, err := ioutil.TempDir("", "serving-cert-unit-")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
caInfo := admin.DefaultServiceSignerCAInfo(certDir)
caOptions := admin.CreateSignerCertOptions{
CertFile: caInfo.CertFile,
KeyFile: caInfo.KeyFile,
Name: admin.DefaultServiceServingCertSignerName(),
Output: ioutil.Discard,
}
ca, err := caOptions.CreateSignerCert()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
kubeclient := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
controller := NewServiceServingCertController(kubeclient.Core(), kubeclient.Core(), ca, "cluster.local", 10*time.Minute)
return caOptions.Name, kubeclient, fakeWatch, controller
}
示例2: CreateTestClient
func CreateTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &v1.PodList{}
podNamePrefix := "mypod"
namespace := "mynamespace"
for i := 0; i < 5; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := v1.Pod{
Status: v1.PodStatus{
Phase: v1.PodRunning,
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: namespace,
Labels: map[string]string{
"name": podName,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "containerName",
Image: "containerImage",
VolumeMounts: []v1.VolumeMount{
{
Name: "volumeMountName",
ReadOnly: false,
MountPath: "/mnt",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "volumeName",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pdName",
FSType: "ext4",
ReadOnly: false,
},
},
},
},
},
}
obj.Items = append(obj.Items, pod)
}
return true, obj, nil
})
fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient
}
示例3: TestHookExecutor_executeExecNewPodFailed
func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a core.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(core.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", core.DefaultWatchReactor(podsWatch, nil))
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodFailed
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client.Core(),
out: ioutil.Discard,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err == nil {
t.Fatalf("expected an error, got none")
}
t.Logf("got expected error: %T", err)
}
示例4: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
// Put one job and one pod into the store
manager.jobStore.Store.Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
close(received)
return nil
}
job, ok := obj.(*batch.Job)
if !ok {
t.Errorf("unexpected type: %v %#v", reflect.TypeOf(obj), obj)
close(received)
return nil
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.internalPodInformer.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
示例5: NewSimpleClientset
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := core.NewObjectTracker(api.Scheme, api.Codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
fakePtr := core.Fake{}
fakePtr.AddReactor("*", "*", core.ObjectReaction(o, api.Registry.RESTMapper()))
fakePtr.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
return &Clientset{fakePtr}
}
示例6: NewSimpleFake
// NewSimpleFake returns a client that will respond with the provided objects
func NewSimpleFake(objects ...runtime.Object) *Fake {
o := core.NewObjectTracker(kapi.Scheme, kapi.Codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
fakeClient := &Fake{}
fakeClient.AddReactor("*", "*", core.ObjectReaction(o, registered.RESTMapper()))
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(watch.NewFake(), nil))
return fakeClient
}
示例7: TestWatchPods
func TestWatchPods(t *testing.T) {
testJob := newJob(2, 2)
clientset := fake.NewSimpleClientset(testJob)
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
// Put one job and one pod into the store
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(testJob)
received := make(chan struct{})
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
}
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received)
return nil
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go sharedInformerFactory.Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler")
<-received
}
示例8: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
client := &fake.Clientset{}
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady
// Put one ReplicaSet and one pod into the controller's stores
labelMap := map[string]string{"foo": "bar"}
testRSSpec := newReplicaSet(1, labelMap)
manager.rsStore.Store.Add(testRSSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rsStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right ReplicaSet.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go manager.internalPodInformer.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod")
testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
}
示例9: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
c := &fake.Clientset{}
c.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicationManager(c, controller.NoResyncPeriodFunc, BurstReplicas)
manager.podStoreSynced = alwaysReady
// Put one rc and one pod into the controller's stores
testControllerSpec := newReplicationController(1)
manager.rcStore.Store.Add(testControllerSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing rc and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key)
}
controllerSpec := obj.(*api.ReplicationController)
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right rc.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec)
testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Expected 1 call but got 0")
}
}
示例10: TestWatchJobs
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady
var testJob batch.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
defer close(received)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
t.Errorf("Error getting namespace/name from key %v: %v", key, err)
}
job, err := manager.jobLister.Jobs(ns).Get(name)
if err != nil || job == nil {
t.Errorf("Expected to find job under key %v: %v", key, err)
return nil
}
if !api.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
}
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
sharedInformerFactory.Start(stopCh)
go manager.Run(1, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Namespace = "bar"
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
示例11: TestWatchControllers
func TestWatchControllers(t *testing.T) {
fakeWatch := watch.NewFake()
client := &fake.Clientset{}
client.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicaSetControllerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady
var testRSSpec extensions.ReplicaSet
received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler. The handler validates the received controller
// and closes the received channel to indicate that the test can finish.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rsStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := *obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
}
close(received)
return nil
}
// Start only the ReplicaSet watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.rsController.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
testRSSpec.Name = "foo"
fakeWatch.Add(&testRSSpec)
select {
case <-received:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("unexpected timeout from result channel")
}
}
示例12: controllerSetup
func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*fake.Clientset, *watch.FakeWatcher, *DockerRegistryServiceController) {
kubeclient := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(core.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("services", core.DefaultWatchReactor(fakeWatch, nil))
controller := NewDockerRegistryServiceController(kubeclient, DockerRegistryServiceControllerOptions{
Resync: 10 * time.Minute,
RegistryNamespace: registryNamespace,
RegistryServiceName: registryName,
DockercfgController: &DockercfgController{},
DockerURLsIntialized: make(chan struct{}),
})
return kubeclient, fakeWatch, controller
}
示例13: TestWatchJobs
func TestWatchJobs(t *testing.T) {
clientset := fake.NewSimpleClientset()
fakeWatch := watch.NewFake()
clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
var testJob batch.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
}
job, ok := obj.(*batch.Job)
if !ok {
t.Fatalf("unexpected type: %v %#v", reflect.TypeOf(obj), obj)
}
if !api.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job)
}
close(received)
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.Run(1, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
示例14: controllerSetup
func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*fake.Clientset, *watch.FakeWatcher, *IngressIPController) {
client := fake.NewSimpleClientset(startingObjects...)
fakeWatch := watch.NewFake()
client.PrependWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
client.PrependReactor("create", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.CreateAction).GetObject()
fakeWatch.Add(obj)
return true, obj, nil
})
// Ensure that updates the controller makes are passed through to the watcher.
client.PrependReactor("update", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.CreateAction).GetObject()
fakeWatch.Modify(obj)
return true, obj, nil
})
controller := newController(t, client)
return client, fakeWatch, controller
}
示例15: TestReplicationControllerStop
//.........这里部分代码省略.........
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1", "k2": "v2", "k3": "v3"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: "zaz",
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1", "k2": "v2"}},
},
},
},
},
StopError: fmt.Errorf("Detected overlapping controllers for rc foo: baz,zaz, please manage deletion individually with --cascade=false."),
ExpectedActions: []string{"get", "list"},
},
{
Name: "TwoExactMatchRCs",
Objs: []runtime.Object{
&api.ReplicationControllerList{ // LIST
Items: []api.ReplicationController{
{
ObjectMeta: api.ObjectMeta{
Name: "zaz",
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
{
ObjectMeta: api.ObjectMeta{
Name: name,
Namespace: ns,
},
Spec: api.ReplicationControllerSpec{
Replicas: 0,
Selector: map[string]string{"k1": "v1"}},
},
},
},
},
StopError: nil,
ExpectedActions: []string{"get", "list", "delete"},
},
}
for _, test := range tests {
copiedForWatch, err := api.Scheme.Copy(test.Objs[0])
if err != nil {
t.Fatalf("%s unexpected error: %v", test.Name, err)
}
fake := fake.NewSimpleClientset(test.Objs...)
fakeWatch := watch.NewFake()
fake.PrependWatchReactor("replicationcontrollers", testcore.DefaultWatchReactor(fakeWatch, nil))
go func() {
fakeWatch.Add(copiedForWatch)
}()
reaper := ReplicationControllerReaper{fake.Core(), time.Millisecond, time.Millisecond}
err = reaper.Stop(ns, name, 0, nil)
if !reflect.DeepEqual(err, test.StopError) {
t.Errorf("%s unexpected error: %v", test.Name, err)
continue
}
actions := fake.Actions()
if len(actions) != len(test.ExpectedActions) {
t.Errorf("%s unexpected actions: %v, expected %d actions got %d", test.Name, actions, len(test.ExpectedActions), len(actions))
continue
}
for i, verb := range test.ExpectedActions {
if actions[i].GetResource().GroupResource() != api.Resource("replicationcontrollers") {
t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb)
}
if actions[i].GetVerb() != verb {
t.Errorf("%s unexpected action: %+v, expected %s-replicationController", test.Name, actions[i], verb)
}
}
}
}