本文整理汇总了Golang中k8s/io/kubernetes/pkg/client/unversioned/testclient.DefaultWatchReactor函数的典型用法代码示例。如果您正苦于以下问题:Golang DefaultWatchReactor函数的具体用法?Golang DefaultWatchReactor怎么用?Golang DefaultWatchReactor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DefaultWatchReactor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: controllerSetup
func controllerSetup(startingObjects []runtime.Object, stopChannel chan struct{}, t *testing.T) ( /*caName*/ string, *ktestclient.Fake, *watch.FakeWatcher, *ServiceServingCertController) {
certDir, err := ioutil.TempDir("", "serving-cert-unit-")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
caInfo := admin.DefaultServiceSignerCAInfo(certDir)
caOptions := admin.CreateSignerCertOptions{
CertFile: caInfo.CertFile,
KeyFile: caInfo.KeyFile,
Name: admin.DefaultServiceServingCertSignerName(),
Output: ioutil.Discard,
}
ca, err := caOptions.CreateSignerCert()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
kubeclient := ktestclient.NewSimpleFake(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(ktestclient.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(ktestclient.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil))
controller := NewServiceServingCertController(kubeclient, kubeclient, ca, "cluster.local", 10*time.Minute)
return caOptions.Name, kubeclient, fakeWatch, controller
}
示例2: mockREST
// mockREST mocks a DeploymentLog REST
func mockREST(version, desired int, endStatus api.DeploymentStatus) *REST {
// Fake deploymentConfig
config := deploytest.OkDeploymentConfig(version)
fakeDn := testclient.NewSimpleFake(config)
fakeDn.PrependReactor("get", "deploymentconfigs", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, config, nil
})
// Fake deployments
fakeDeployments := makeDeploymentList(version)
fakeRn := ktestclient.NewSimpleFake(fakeDeployments)
fakeRn.PrependReactor("get", "replicationcontrollers", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, &fakeDeployments.Items[desired-1], nil
})
// Fake watcher for deployments
fakeWatch := watch.NewFake()
fakeRn.PrependWatchReactor("replicationcontrollers", ktestclient.DefaultWatchReactor(fakeWatch, nil))
// Everything is fake
connectionInfo := &kclient.HTTPKubeletClient{Config: &kclient.KubeletConfig{EnableHttps: true, Port: 12345}, Client: &http.Client{}}
obj := &fakeDeployments.Items[desired-1]
obj.Annotations[api.DeploymentStatusAnnotation] = string(endStatus)
go fakeWatch.Add(obj)
return &REST{
ConfigGetter: fakeDn,
DeploymentGetter: fakeRn,
PodGetter: &deployerPodGetter{},
ConnectionInfo: connectionInfo,
Timeout: defaultTimeout,
}
}
示例3: TestUpdatePods
func TestUpdatePods(t *testing.T) {
fakeWatch := watch.NewFake()
client := &testclient.Fake{}
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicationManager(client, BurstReplicas)
manager.podStoreSynced = alwaysReady
received := make(chan string)
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key)
}
received <- obj.(*api.ReplicationController).Name
return nil
}
stopCh := make(chan struct{})
defer close(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
// Put 2 rcs and one pod into the controller's stores
testControllerSpec1 := newReplicationController(1)
manager.rcStore.Store.Add(testControllerSpec1)
testControllerSpec2 := *testControllerSpec1
testControllerSpec2.Spec.Selector = map[string]string{"bar": "foo"}
testControllerSpec2.Name = "barfoo"
manager.rcStore.Store.Add(&testControllerSpec2)
// Put one pod in the podStore
pod1 := newPodList(manager.podStore.Store, 1, api.PodRunning, testControllerSpec1).Items[0]
pod2 := pod1
pod2.Labels = testControllerSpec2.Spec.Selector
// Send an update of the same pod with modified labels, and confirm we get a sync request for
// both controllers
manager.updatePod(&pod1, &pod2)
expected := sets.NewString(testControllerSpec1.Name, testControllerSpec2.Name)
for _, name := range expected.List() {
t.Logf("Expecting update for %+v", name)
select {
case got := <-received:
if !expected.Has(got) {
t.Errorf("Expected keys %#v got %v", expected, got)
}
case <-time.After(util.ForeverTestTimeout):
t.Errorf("Expected update notifications for controllers within 100ms each")
}
}
}
示例4: TestHookExecutor_executeExecNewPodFailed
func TestHookExecutor_executeExecNewPodFailed(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(testclient.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodFailed
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client,
out: ioutil.Discard,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err == nil {
t.Fatalf("expected an error, got none")
}
t.Logf("got expected error: %T", err)
}
示例5: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
client := &testclient.Fake{}
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicaSetController(client, controller.NoResyncPeriodFunc, BurstReplicas)
manager.podStoreSynced = alwaysReady
// Put one ReplicaSet and one pod into the controller's stores
labelMap := map[string]string{"foo": "bar"}
testRSSpec := newReplicaSet(1, labelMap)
manager.rsStore.Store.Add(testRSSpec)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rsStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find replica set under key %v", key)
}
rsSpec := obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right ReplicaSet.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec)
testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(util.ForeverTestTimeout):
t.Errorf("Expected 1 call but got 0")
}
}
示例6: TestWatchPods
func TestWatchPods(t *testing.T) {
fakeWatch := watch.NewFake()
client := &testclient.Fake{}
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobController(client, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
// Put one job and one pod into the store
testJob := newJob(2, 2)
manager.jobStore.Store.Add(testJob)
received := make(chan string)
// The pod update sent through the fakeWatcher should figure out the managing job and
// send it into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
}
job := obj.(*extensions.Job)
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
}
close(received)
return nil
}
// Start only the pod watcher and the workqueue, send a watch event,
// and make sure it hits the sync method for the right job.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.podController.Run(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob)
testPod := pods[0]
testPod.Status.Phase = api.PodFailed
fakeWatch.Add(&testPod)
select {
case <-received:
case <-time.After(controllerTimeout):
t.Errorf("Expected 1 call but got 0")
}
}
示例7: TestWatchControllers
func TestWatchControllers(t *testing.T) {
fakeWatch := watch.NewFake()
client := &testclient.Fake{}
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewReplicationManager(client, controller.NoResyncPeriodFunc, BurstReplicas)
manager.podStoreSynced = alwaysReady
var testControllerSpec api.ReplicationController
received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler. The handler validates the received controller
// and closes the received channel to indicate that the test can finish.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.rcStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key)
}
controllerSpec := *obj.(*api.ReplicationController)
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
}
close(received)
return nil
}
// Start only the rc watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.rcController.Run(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
testControllerSpec.Name = "foo"
fakeWatch.Add(&testControllerSpec)
select {
case <-received:
case <-time.After(util.ForeverTestTimeout):
t.Errorf("Expected 1 call but got 0")
}
}
示例8: controllerSetup
func controllerSetup(startingObjects []runtime.Object, t *testing.T) (*ktestclient.Fake, *watch.FakeWatcher, *DockerRegistryServiceController) {
kubeclient := ktestclient.NewSimpleFake(startingObjects...)
fakeWatch := watch.NewFake()
kubeclient.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(ktestclient.CreateAction).GetObject(), nil
})
kubeclient.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
return true, action.(ktestclient.UpdateAction).GetObject(), nil
})
kubeclient.PrependWatchReactor("services", ktestclient.DefaultWatchReactor(fakeWatch, nil))
controller := NewDockerRegistryServiceController(kubeclient, DockerRegistryServiceControllerOptions{
Resync: 10 * time.Minute,
RegistryNamespace: registryNamespace,
RegistryServiceName: registryName,
DockercfgController: &DockercfgController{},
DockerURLsIntialized: make(chan struct{}),
})
return kubeclient, fakeWatch, controller
}
示例9: TestWatchJobs
func TestWatchJobs(t *testing.T) {
client := testclient.NewSimpleFake()
fakeWatch := watch.NewFake()
client.PrependWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobController(client, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
var testJob extensions.Job
received := make(chan struct{})
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
}
job := *obj.(*extensions.Job)
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, job)
}
close(received)
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.jobController.Run(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Name = "foo"
fakeWatch.Add(&testJob)
t.Log("Waiting for job to reach syncHandler")
<-received
}
示例10: controllerSetup
func controllerSetup(t *testing.T, startingObjects []runtime.Object) (*ktestclient.Fake, *watch.FakeWatcher, *IngressIPController) {
client := ktestclient.NewSimpleFake(startingObjects...)
fakeWatch := watch.NewFake()
client.PrependWatchReactor("*", ktestclient.DefaultWatchReactor(fakeWatch, nil))
client.PrependReactor("create", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(ktestclient.CreateAction).GetObject()
fakeWatch.Add(obj)
return true, obj, nil
})
// Ensure that updates the controller makes are passed through to the watcher.
client.PrependReactor("update", "*", func(action ktestclient.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(ktestclient.CreateAction).GetObject()
fakeWatch.Modify(obj)
return true, obj, nil
})
controller := newController(t, client)
return client, fakeWatch, controller
}
示例11: runFuzzer
func runFuzzer(t *testing.T) {
stopCh := make(chan struct{})
defer close(stopCh)
startingNamespaces := CreateStartingNamespaces()
kubeclient := ktestclient.NewSimpleFake(startingNamespaces...)
nsWatch := watch.NewFake()
kubeclient.PrependWatchReactor("namespaces", ktestclient.DefaultWatchReactor(nsWatch, nil))
startingQuotas := CreateStartingQuotas()
originclient := testclient.NewSimpleFake(startingQuotas...)
quotaWatch := watch.NewFake()
originclient.AddWatchReactor("clusterresourcequotas", ktestclient.DefaultWatchReactor(quotaWatch, nil))
informerFactory := shared.NewInformerFactory(kubeclient, originclient, shared.DefaultListerWatcherOverrides{}, 10*time.Minute)
controller := NewClusterQuotaMappingController(informerFactory.Namespaces(), informerFactory.ClusterResourceQuotas())
go controller.Run(5, stopCh)
informerFactory.Start(stopCh)
informerFactory.StartCore(stopCh)
finalNamespaces := map[string]*kapi.Namespace{}
finalQuotas := map[string]*quotaapi.ClusterResourceQuota{}
quotaActions := map[string][]string{}
namespaceActions := map[string][]string{}
finishedNamespaces := make(chan struct{})
finishedQuotas := make(chan struct{})
for _, quota := range startingQuotas {
name := quota.(*quotaapi.ClusterResourceQuota).Name
quotaActions[name] = append(quotaActions[name], fmt.Sprintf("inserting %v to %v", name, quota.(*quotaapi.ClusterResourceQuota).Spec.Selector))
finalQuotas[name] = quota.(*quotaapi.ClusterResourceQuota)
}
for _, namespace := range startingNamespaces {
name := namespace.(*kapi.Namespace).Name
namespaceActions[name] = append(namespaceActions[name], fmt.Sprintf("inserting %v to %v", name, namespace.(*kapi.Namespace).Labels))
finalNamespaces[name] = namespace.(*kapi.Namespace)
}
go func() {
for i := 0; i < 200; i++ {
name := quotaNames[rand.Intn(len(quotaNames))]
_, exists := finalQuotas[name]
if rand.Intn(50) == 0 {
if !exists {
continue
}
// due to the compression race (see big comment for impl), clear the queue then delete
for {
if len(quotaWatch.ResultChan()) == 0 {
break
}
time.Sleep(10 * time.Millisecond)
}
quotaActions[name] = append(quotaActions[name], "deleting "+name)
quotaWatch.Delete(finalQuotas[name])
delete(finalQuotas, name)
continue
}
quota := NewQuota(name)
finalQuotas[name] = quota
copied, err := kapi.Scheme.Copy(quota)
if err != nil {
t.Fatal(err)
}
if exists {
quotaActions[name] = append(quotaActions[name], fmt.Sprintf("updating %v to %v", name, quota.Spec.Selector))
quotaWatch.Modify(copied)
} else {
quotaActions[name] = append(quotaActions[name], fmt.Sprintf("adding %v to %v", name, quota.Spec.Selector))
quotaWatch.Add(copied)
}
}
close(finishedQuotas)
}()
go func() {
for i := 0; i < 200; i++ {
name := namespaceNames[rand.Intn(len(namespaceNames))]
_, exists := finalNamespaces[name]
if rand.Intn(50) == 0 {
if !exists {
continue
}
// due to the compression race (see big comment for impl), clear the queue then delete
for {
if len(nsWatch.ResultChan()) == 0 {
break
}
time.Sleep(10 * time.Millisecond)
}
namespaceActions[name] = append(namespaceActions[name], "deleting "+name)
nsWatch.Delete(finalNamespaces[name])
delete(finalNamespaces, name)
continue
}
ns := NewNamespace(name)
//.........这里部分代码省略.........
示例12: TestGetFirstPod
//.........这里部分代码省略.........
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 2,
},
{
name: "kubectl attach - wait for ready pod",
podList: newPodList(1, 1, -1, labelSet),
watching: []watch.Event{
{
Type: watch.Modified,
Object: &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
},
},
sortBy: func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) },
expected: &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "pod-1",
Namespace: api.NamespaceDefault,
CreationTimestamp: unversioned.Date(2016, time.April, 1, 1, 0, 0, 0, time.UTC),
Labels: map[string]string{"test": "selector"},
},
Status: api.PodStatus{
Conditions: []api.PodCondition{
{
Status: api.ConditionTrue,
Type: api.PodReady,
},
},
},
},
expectedNum: 1,
},
}
for i := range tests {
test := tests[i]
client := &testclient.Fake{}
client.PrependReactor("list", "pods", func(action testclient.Action) (handled bool, ret runtime.Object, err error) {
return true, test.podList, nil
})
if len(test.watching) > 0 {
watcher := watch.NewFake()
for _, event := range test.watching {
switch event.Type {
case watch.Added:
go watcher.Add(event.Object)
case watch.Modified:
go watcher.Modify(event.Object)
}
}
client.PrependWatchReactor("pods", testclient.DefaultWatchReactor(watcher, nil))
}
selector := labels.Set(labelSet).AsSelector()
pod, numPods, err := GetFirstPod(client, api.NamespaceDefault, selector, 1*time.Minute, test.sortBy)
if !test.expectedErr && err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
continue
}
if test.expectedErr && err == nil {
t.Errorf("%s: expected an error", test.name)
continue
}
if test.expectedNum != numPods {
t.Errorf("%s: expected %d pods, got %d", test.name, test.expectedNum, numPods)
continue
}
if !reflect.DeepEqual(test.expected, pod) {
t.Errorf("%s:\nexpected pod:\n%#v\ngot:\n%#v\n\n", test.name, test.expected, pod)
}
}
}
示例13: TestAdmission
// TestAdmission
func TestAdmission(t *testing.T) {
namespaceObj := &api.Namespace{
ObjectMeta: api.ObjectMeta{
Name: "test",
Namespace: "",
},
Status: api.NamespaceStatus{
Phase: api.NamespaceActive,
},
}
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(namespaceObj)
fakeWatch := watch.NewFake()
mockClient := &testclient.Fake{}
mockClient.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
mockClient.AddReactor("get", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
if getAction, ok := action.(testclient.GetAction); ok && getAction.GetName() == namespaceObj.Name {
return true, namespaceObj, nil
}
return true, nil, fmt.Errorf("No result for action %v", action)
})
mockClient.AddReactor("list", "namespaces", func(action testclient.Action) (bool, runtime.Object, error) {
return true, &api.NamespaceList{Items: []api.Namespace{*namespaceObj}}, nil
})
lfhandler := NewLifecycle(mockClient).(*lifecycle)
lfhandler.store = store
handler := admission.NewChainHandler(lfhandler)
pod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespaceObj.Name},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
badPod := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "456", Namespace: "doesnotexist"},
Spec: api.PodSpec{
Volumes: []api.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image"}},
},
}
err := handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// change namespace state to terminating
namespaceObj.Status.Phase = api.NamespaceTerminating
store.Add(namespaceObj)
// verify create operations in the namespace cause an error
err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected error rejecting creates in a namespace when it is terminating")
}
// verify update operations in the namespace can proceed
err = handler.Admit(admission.NewAttributesRecord(&pod, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Update, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// verify delete operations in the namespace can proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Pod", pod.Namespace, pod.Name, "pods", "", admission.Delete, nil))
if err != nil {
t.Errorf("Unexpected error returned from admission handler: %v", err)
}
// verify delete of namespace default can never proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", api.NamespaceDefault, "namespaces", "", admission.Delete, nil))
if err == nil {
t.Errorf("Expected an error that this namespace can never be deleted")
}
// verify delete of namespace other than default can proceed
err = handler.Admit(admission.NewAttributesRecord(nil, "Namespace", "", "other", "namespaces", "", admission.Delete, nil))
if err != nil {
t.Errorf("Did not expect an error %v", err)
}
// verify create/update/delete of object in non-existant namespace throws error
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Create, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be created in non-existant namespaces", err)
}
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Update, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be updated in non-existant namespaces", err)
}
err = handler.Admit(admission.NewAttributesRecord(&badPod, "Pod", badPod.Namespace, badPod.Name, "pods", "", admission.Delete, nil))
if err == nil {
t.Errorf("Expected an aerror that objects cannot be deleted in non-existant namespaces", err)
}
}
示例14: TestWatchJobs
func TestWatchJobs(t *testing.T) {
fakeWatch := watch.NewFake()
client := &testclient.Fake{}
client.AddWatchReactor("*", testclient.DefaultWatchReactor(fakeWatch, nil))
manager := NewJobController(client, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady
var testJob extensions.Job
received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue,
// and eventually into the syncHandler.
manager.syncHandler = func(key string) error {
obj, exists, err := manager.jobStore.Store.GetByKey(key)
if !exists || err != nil {
t.Errorf("Expected to find job under key %v", key)
}
job := *obj.(*extensions.Job)
if !api.Semantic.DeepDerivative(job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, job)
}
received <- key
return nil
}
// Start only the job watcher and the workqueue, send a watch event,
// and make sure it hits the sync method.
stopCh := make(chan struct{})
defer close(stopCh)
go manager.jobController.Run(stopCh)
go util.Until(manager.worker, 10*time.Millisecond, stopCh)
// We're sending new job to see if it reaches syncHandler.
testJob.Name = "foo"
fakeWatch.Add(&testJob)
select {
case <-received:
case <-time.After(controllerTimeout):
t.Errorf("Expected 1 call but got 0")
}
// We're sending fake finished job, to see if it reaches syncHandler - it should not,
// since we're filtering out finished jobs.
testJobv2 := extensions.Job{
ObjectMeta: api.ObjectMeta{Name: "foo"},
Status: extensions.JobStatus{
Conditions: []extensions.JobCondition{{
Type: extensions.JobComplete,
Status: api.ConditionTrue,
LastProbeTime: unversioned.Now(),
LastTransitionTime: unversioned.Now(),
}},
},
}
fakeWatch.Modify(&testJobv2)
select {
case <-received:
t.Errorf("Expected 0 call but got 1")
case <-time.After(controllerTimeout):
}
}
示例15: TestHookExecutor_executeExecNewPodSucceeded
func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) {
hook := &deployapi.LifecycleHook{
FailurePolicy: deployapi.LifecycleHookFailurePolicyAbort,
ExecNewPod: &deployapi.ExecNewPodHook{
ContainerName: "container1",
},
}
config := deploytest.OkDeploymentConfig(1)
deployment, _ := deployutil.MakeDeployment(config, kapi.Codecs.LegacyCodec(deployv1.SchemeGroupVersion))
deployment.Spec.Template.Spec.NodeSelector = map[string]string{"labelKey1": "labelValue1", "labelKey2": "labelValue2"}
client := newTestClient(config)
podCreated := make(chan struct{})
var createdPod *kapi.Pod
client.AddReactor("create", "pods", func(a testclient.Action) (handled bool, ret runtime.Object, err error) {
defer close(podCreated)
action := a.(testclient.CreateAction)
object := action.GetObject()
createdPod = object.(*kapi.Pod)
return true, createdPod, nil
})
podsWatch := watch.NewFake()
client.AddWatchReactor("pods", testclient.DefaultWatchReactor(podsWatch, nil))
podLogs := &bytes.Buffer{}
// Simulate creation of the lifecycle pod
go func() {
<-podCreated
podsWatch.Add(createdPod)
podCopy, _ := kapi.Scheme.Copy(createdPod)
updatedPod := podCopy.(*kapi.Pod)
updatedPod.Status.Phase = kapi.PodSucceeded
podsWatch.Modify(updatedPod)
}()
executor := &HookExecutor{
pods: client,
out: podLogs,
decoder: kapi.Codecs.UniversalDecoder(),
getPodLogs: func(*kapi.Pod) (io.ReadCloser, error) {
return ioutil.NopCloser(strings.NewReader("test")), nil
},
}
err := executor.executeExecNewPod(hook, deployment, "hook", "test")
if err != nil {
t.Fatalf("unexpected error: %s", err)
}
if e, a := "--> test: Running hook pod ...\ntest--> test: Success\n", podLogs.String(); e != a {
t.Fatalf("expected pod logs to be %q, got %q", e, a)
}
if e, a := deployment.Spec.Template.Spec.NodeSelector, createdPod.Spec.NodeSelector; !reflect.DeepEqual(e, a) {
t.Fatalf("expected pod NodeSelector %v, got %v", e, a)
}
if createdPod.Spec.ActiveDeadlineSeconds == nil {
t.Fatalf("expected ActiveDeadlineSeconds to be set on the deployment hook executor pod")
}
if *createdPod.Spec.ActiveDeadlineSeconds >= deployapi.MaxDeploymentDurationSeconds {
t.Fatalf("expected ActiveDeadlineSeconds %+v to be lower than %+v", *createdPod.Spec.ActiveDeadlineSeconds, deployapi.MaxDeploymentDurationSeconds)
}
}