本文整理汇总了Golang中k8s/io/kubernetes/contrib/mesos/pkg/assert.EventuallyTrue函数的典型用法代码示例。如果您正苦于以下问题:Golang EventuallyTrue函数的具体用法?Golang EventuallyTrue怎么用?Golang EventuallyTrue使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了EventuallyTrue函数的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: TestExecutorFrameworkMessage
// TestExecutorFrameworkMessage ensures that the executor is able to
// handle messages from the framework, specifically about lost tasks
// and Kamikaze. When a task is lost, the executor needs to clean up
// its state. When a Kamikaze message is received, the executor should
// attempt suicide.
func TestExecutorFrameworkMessage(t *testing.T) {
// create fake apiserver
podListWatch := NewMockPodsListWatch(api.PodList{})
testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list)
defer testApiServer.server.Close()
// create and start executor
mockDriver := &MockExecutorDriver{}
kubeletFinished := make(chan struct{})
config := Config{
Docker: dockertools.ConnectToDockerOrDie("fake://"),
Updates: make(chan interface{}, 1024),
APIClient: client.NewOrDie(&client.Config{
Host: testApiServer.server.URL,
Version: testapi.Default.Version(),
}),
Kubelet: &fakeKubelet{
Kubelet: &kubelet.Kubelet{},
hostIP: net.IPv4(127, 0, 0, 1),
},
PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) {
return &api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
Phase: api.PodRunning,
}, nil
},
ShutdownAlert: func() {
close(kubeletFinished)
},
KubeletFinished: kubeletFinished,
}
executor := New(config)
executor.Init(mockDriver)
executor.Registered(mockDriver, nil, nil, nil)
executor.FrameworkMessage(mockDriver, "test framework message")
// set up a pod to then lose
pod := NewTestPod(1)
podTask, _ := podtask.New(api.NewDefaultContext(), "foo",
*pod, &mesosproto.ExecutorInfo{})
taskInfo := podTask.BuildTaskInfo()
data, _ := testapi.Default.Codec().Encode(pod)
taskInfo.Data = data
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_STARTING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Once()
called := make(chan struct{})
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_RUNNING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once()
executor.LaunchTask(mockDriver, taskInfo)
// waiting until the pod is really running b/c otherwise a TASK_FAILED could be
// triggered by the asynchronously running _launchTask, __launchTask methods
// when removing the task from k.tasks through the "task-lost:foo" message below.
select {
case <-called:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for SendStatusUpdate for the running task")
}
// send task-lost message for it
called = make(chan struct{})
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_LOST,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once()
executor.FrameworkMessage(mockDriver, "task-lost:foo")
assertext.EventuallyTrue(t, 5*time.Second, func() bool {
executor.lock.Lock()
defer executor.lock.Unlock()
return len(executor.tasks) == 0 && len(executor.pods) == 0
}, "executor must be able to kill a created task and pod")
select {
case <-called:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for SendStatusUpdate")
}
//.........这里部分代码省略.........
示例2: TestPlugin_LifeCycle
//.........这里部分代码省略.........
return startPodWithOffers(pod, offers)
}
// start another pod
pod, launchedTask, _ := startTestPod()
// mock drvier.KillTask, should be invoked when a pod is deleted
mockDriver.On("KillTask", mAny("*mesosproto.TaskID")).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
killedTaskId := *(args.Get(0).(*mesos.TaskID))
assert.Equal(*launchedTask.taskInfo.TaskId, killedTaskId, "expected same TaskID as during launch")
})
killTaskCalled := mockDriver.Upon()
// stop it again via the apiserver mock
podListWatch.Delete(pod, true) // notify watchers
// and wait for the driver killTask call with the correct TaskId
select {
case <-killTaskCalled:
// report back that the task is finished
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_FINISHED))
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for KillTask")
}
// start a pod with on a given NodeName and check that it is scheduled to the right host
pod, i = NewTestPod()
pod.Spec.NodeName = "hostname1"
offers = []*mesos.Offer{}
for j := 0; j < 3; j++ {
offer := NewTestOffer(fmt.Sprintf("offer%d_%d", i, j))
hostname := fmt.Sprintf("hostname%d", j)
offer.Hostname = &hostname
offers = append(offers, offer)
}
_, _, usedOffer := startPodWithOffers(pod, offers)
assert.Equal(offers[1].Id.GetValue(), usedOffer.Id.GetValue())
assert.Equal(pod.Spec.NodeName, *usedOffer.Hostname)
testScheduler.OfferRescinded(mockDriver, offers[0].Id)
testScheduler.OfferRescinded(mockDriver, offers[2].Id)
// start pods:
// - which are failing while binding,
// - leading to reconciliation
// - with different states on the apiserver
failPodFromExecutor := func(task *mesos.TaskInfo) {
beforePodLookups := testApiServer.Stats(pod.Name)
status := newTaskStatusForTask(task, mesos.TaskState_TASK_FAILED)
message := messages.CreateBindingFailure
status.Message = &message
testScheduler.StatusUpdate(mockDriver, status)
// wait until pod is looked up at the apiserver
assertext.EventuallyTrue(t, time.Second, func() bool {
return testApiServer.Stats(pod.Name) == beforePodLookups+1
}, "expect that reconcileTask will access apiserver for pod %v", pod.Name)
}
launchTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
pod, i := NewTestPod()
offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
return launchPodWithOffers(pod, offers)
}
// 1. with pod deleted from the apiserver
// expected: pod is removed from internal task registry
pod, launchedTask, _ = launchTestPod()
podListWatch.Delete(pod, false) // not notifying the watchers
failPodFromExecutor(launchedTask.taskInfo)
podKey, _ := podtask.MakePodKey(api.NewDefaultContext(), pod.Name)
assertext.EventuallyTrue(t, time.Second, func() bool {
t, _ := p.api.tasks().ForPod(podKey)
return t == nil
})
// 2. with pod still on the apiserver, not bound
// expected: pod is rescheduled
pod, launchedTask, _ = launchTestPod()
failPodFromExecutor(launchedTask.taskInfo)
retryOffers := []*mesos.Offer{NewTestOffer("retry-offer")}
schedulePodWithOffers(pod, retryOffers)
// 3. with pod still on the apiserver, bound, notified via ListWatch
// expected: nothing, pod updates not supported, compare ReconcileTask function
pod, launchedTask, usedOffer = startTestPod()
pod.Annotations = map[string]string{
meta.BindingHostKey: *usedOffer.Hostname,
}
pod.Spec.NodeName = *usedOffer.Hostname
podListWatch.Modify(pod, true) // notifying the watchers
time.Sleep(time.Second / 2)
failPodFromExecutor(launchedTask.taskInfo)
}
示例3: TestExecutorLaunchAndKillTask
// TestExecutorLaunchAndKillTask ensures that the executor is able to launch
// and kill tasks while properly bookkeping its tasks.
func TestExecutorLaunchAndKillTask(t *testing.T) {
// create a fake pod watch. We use that below to submit new pods to the scheduler
podListWatch := NewMockPodsListWatch(api.PodList{})
// create fake apiserver
testApiServer := NewTestServer(t, api.NamespaceDefault, &podListWatch.list)
defer testApiServer.server.Close()
mockDriver := &MockExecutorDriver{}
updates := make(chan interface{}, 1024)
config := Config{
Docker: dockertools.ConnectToDockerOrDie("fake://"),
Updates: updates,
APIClient: client.NewOrDie(&client.Config{
Host: testApiServer.server.URL,
Version: testapi.Default.Version(),
}),
Kubelet: &fakeKubelet{
Kubelet: &kubelet.Kubelet{},
hostIP: net.IPv4(127, 0, 0, 1),
},
PodStatusFunc: func(kl KubeletInterface, pod *api.Pod) (*api.PodStatus, error) {
return &api.PodStatus{
ContainerStatuses: []api.ContainerStatus{
{
Name: "foo",
State: api.ContainerState{
Running: &api.ContainerStateRunning{},
},
},
},
Phase: api.PodRunning,
}, nil
},
}
executor := New(config)
executor.Init(mockDriver)
executor.Registered(mockDriver, nil, nil, nil)
select {
case <-updates:
case <-time.After(time.Second):
t.Fatalf("Executor should send an initial update on Registration")
}
pod := NewTestPod(1)
podTask, err := podtask.New(api.NewDefaultContext(), "",
*pod, &mesosproto.ExecutorInfo{})
assert.Equal(t, nil, err, "must be able to create a task from a pod")
taskInfo := podTask.BuildTaskInfo()
data, err := testapi.Default.Codec().Encode(pod)
assert.Equal(t, nil, err, "must be able to encode a pod's spec data")
taskInfo.Data = data
var statusUpdateCalls sync.WaitGroup
statusUpdateDone := func(_ mock.Arguments) { statusUpdateCalls.Done() }
statusUpdateCalls.Add(1)
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_STARTING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once()
statusUpdateCalls.Add(1)
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_RUNNING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once()
executor.LaunchTask(mockDriver, taskInfo)
assertext.EventuallyTrue(t, 5*time.Second, func() bool {
executor.lock.Lock()
defer executor.lock.Unlock()
return len(executor.tasks) == 1 && len(executor.pods) == 1
}, "executor must be able to create a task and a pod")
gotPodUpdate := false
select {
case m := <-updates:
update, ok := m.(kubelet.PodUpdate)
if ok && len(update.Pods) == 1 {
gotPodUpdate = true
}
case <-time.After(time.Second):
}
assert.Equal(t, true, gotPodUpdate,
"the executor should send an update about a new pod to "+
"the updates chan when creating a new one.")
// Allow some time for asynchronous requests to the driver.
finished := kmruntime.After(statusUpdateCalls.Wait)
select {
case <-finished:
case <-time.After(5 * time.Second):
t.Fatalf("timed out waiting for status update calls to finish")
}
//.........这里部分代码省略.........
示例4: WaitForNode
func (srv *TestServer) WaitForNode(name string) {
assertext.EventuallyTrue(srv.t, util.ForeverTestTimeout, func() bool {
return srv.LookupNode(name) != nil
})
}
示例5: TestScheduler_LifeCycle
//.........这里部分代码省略.........
// mock driver.KillTask, should be invoked when a pod is deleted
lt.driver.On("KillTask",
mock.AnythingOfType("*mesosproto.TaskID"),
).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
killedTaskId := *(args.Get(0).(*mesos.TaskID))
assert.Equal(*launchedTask.taskInfo.TaskId, killedTaskId, "expected same TaskID as during launch")
})
killTaskCalled := lt.driver.Upon()
// stop it again via the apiserver mock
lt.podsListWatch.Delete(pod, true) // notify watchers
// and wait for the driver killTask call with the correct TaskId
select {
case <-killTaskCalled:
// report back that the task is finished
lt.framework.StatusUpdate(
lt.driver,
newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_FINISHED),
)
case <-time.After(util.ForeverTestTimeout):
t.Fatal("timed out waiting for KillTask")
}
// start a pod with on a given NodeName and check that it is scheduled to the right host
pod, i = NewTestPod()
pod.Spec.NodeName = "hostname1"
offers = []*mesos.Offer{}
for j := 0; j < 3; j++ {
offer := NewTestOffer(fmt.Sprintf("offer%d_%d", i, j))
hostname := fmt.Sprintf("hostname%d", j)
offer.Hostname = &hostname
offers = append(offers, offer)
}
_, _, usedOffer := startPodWithOffers(pod, offers)
assert.Equal(offers[1].Id.GetValue(), usedOffer.Id.GetValue())
assert.Equal(pod.Spec.NodeName, *usedOffer.Hostname)
lt.framework.OfferRescinded(lt.driver, offers[0].Id)
lt.framework.OfferRescinded(lt.driver, offers[2].Id)
// start pods:
// - which are failing while binding,
// - leading to reconciliation
// - with different states on the apiserver
failPodFromExecutor := func(task *mesos.TaskInfo) {
beforePodLookups := lt.apiServer.Stats(pod.Name)
status := newTaskStatusForTask(task, mesos.TaskState_TASK_FAILED)
message := messages.CreateBindingFailure
status.Message = &message
lt.framework.StatusUpdate(lt.driver, status)
// wait until pod is looked up at the apiserver
assertext.EventuallyTrue(t, util.ForeverTestTimeout, func() bool {
return lt.apiServer.Stats(pod.Name) == beforePodLookups+1
}, "expect that reconcileTask will access apiserver for pod %v", pod.Name)
}
launchTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
pod, i := NewTestPod()
offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
return launchPodWithOffers(pod, offers)
}
// 1. with pod deleted from the apiserver
// expected: pod is removed from internal task registry
pod, launchedTask, _ = launchTestPod()
lt.podsListWatch.Delete(pod, false) // not notifying the watchers
failPodFromExecutor(launchedTask.taskInfo)
podKey, _ := podtask.MakePodKey(api.NewDefaultContext(), pod.Name)
assertext.EventuallyTrue(t, util.ForeverTestTimeout, func() bool {
t, _ := lt.sched.Tasks().ForPod(podKey)
return t == nil
})
// 2. with pod still on the apiserver, not bound
// expected: pod is rescheduled
pod, launchedTask, _ = launchTestPod()
failPodFromExecutor(launchedTask.taskInfo)
retryOffers := []*mesos.Offer{NewTestOffer("retry-offer")}
schedulePodWithOffers(pod, retryOffers)
// 3. with pod still on the apiserver, bound, notified via ListWatch
// expected: nothing, pod updates not supported, compare ReconcileTask function
pod, launchedTask, usedOffer = startTestPod()
pod.Annotations = map[string]string{
meta.BindingHostKey: *usedOffer.Hostname,
}
pod.Spec.NodeName = *usedOffer.Hostname
lt.podsListWatch.Modify(pod, true) // notifying the watchers
time.Sleep(time.Second / 2)
failPodFromExecutor(launchedTask.taskInfo)
}
示例6: TestExecutorFrameworkMessage
// TestExecutorFrameworkMessage ensures that the executor is able to
// handle messages from the framework, specifically about lost tasks
// and Kamikaze. When a task is lost, the executor needs to clean up
// its state. When a Kamikaze message is received, the executor should
// attempt suicide.
func TestExecutorFrameworkMessage(t *testing.T) {
// TODO(jdef): Fix the unexpected call in the mocking system.
t.Skip("This test started failing when panic catching was disabled.")
var (
mockDriver = &MockExecutorDriver{}
kubeletFinished = make(chan struct{})
registry = newFakeRegistry()
executor = New(Config{
Docker: dockertools.ConnectToDockerOrDie("fake://", 0),
NodeInfos: make(chan NodeInfo, 1),
ShutdownAlert: func() {
close(kubeletFinished)
},
KubeletFinished: kubeletFinished,
Registry: registry,
})
pod = NewTestPod(1)
mockKubeAPI = &mockKubeAPI{}
)
executor.kubeAPI = mockKubeAPI
executor.Init(mockDriver)
executor.Registered(mockDriver, nil, nil, nil)
executor.FrameworkMessage(mockDriver, "test framework message")
// set up a pod to then lose
executorinfo := &mesosproto.ExecutorInfo{}
podTask, _ := podtask.New(
api.NewDefaultContext(),
podtask.Config{
ID: "foo",
Prototype: executorinfo,
HostPortStrategy: hostport.StrategyWildcard,
},
pod,
)
pod.Annotations = map[string]string{
"k8s.mesosphere.io/taskId": podTask.ID,
}
podTask.Spec = &podtask.Spec{
Executor: executorinfo,
}
taskInfo, err := podTask.BuildTaskInfo()
assert.Equal(t, nil, err, "must be able to build task info")
data, _ := runtime.Encode(testapi.Default.Codec(), pod)
taskInfo.Data = data
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_STARTING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Once()
called := make(chan struct{})
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_RUNNING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once()
executor.LaunchTask(mockDriver, taskInfo)
// must wait for this otherwise phase changes may not apply
assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool {
executor.lock.Lock()
defer executor.lock.Unlock()
return !registry.empty()
}, "executor must be able to create a task and a pod")
err = registry.phaseChange(pod, api.PodPending)
assert.NoError(t, err)
err = registry.phaseChange(pod, api.PodRunning)
assert.NoError(t, err)
// waiting until the pod is really running b/c otherwise a TASK_FAILED could be
// triggered by the asynchronously running executor methods when removing the task
// from k.tasks through the "task-lost:foo" message below.
select {
case <-called:
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timed out waiting for SendStatusUpdate for the running task")
}
// send task-lost message for it
called = make(chan struct{})
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_LOST,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(func(_ mock.Arguments) { close(called) }).Once()
// simulate what happens when the apiserver is told to delete a pod
mockKubeAPI.On("killPod", pod.Namespace, pod.Name).Return(nil).Run(func(_ mock.Arguments) {
registry.Remove(podTask.ID)
})
//.........这里部分代码省略.........
示例7: TestExecutorLaunchAndKillTask
// TestExecutorLaunchAndKillTask ensures that the executor is able to launch tasks and generates
// appropriate status messages for mesos. It then kills the task and validates that appropriate
// actions are taken by the executor.
func TestExecutorLaunchAndKillTask(t *testing.T) {
var (
mockDriver = &MockExecutorDriver{}
registry = newFakeRegistry()
executor = New(Config{
Docker: dockertools.ConnectToDockerOrDie("fake://", 0),
NodeInfos: make(chan NodeInfo, 1),
Registry: registry,
})
mockKubeAPI = &mockKubeAPI{}
pod = NewTestPod(1)
executorinfo = &mesosproto.ExecutorInfo{}
)
executor.kubeAPI = mockKubeAPI
executor.Init(mockDriver)
executor.Registered(mockDriver, nil, nil, nil)
podTask, err := podtask.New(
api.NewDefaultContext(),
podtask.Config{
Prototype: executorinfo,
HostPortStrategy: hostport.StrategyWildcard,
},
pod,
)
assert.Equal(t, nil, err, "must be able to create a task from a pod")
pod.Annotations = map[string]string{
"k8s.mesosphere.io/taskId": podTask.ID,
}
podTask.Spec = &podtask.Spec{Executor: executorinfo}
taskInfo, err := podTask.BuildTaskInfo()
assert.Equal(t, nil, err, "must be able to build task info")
data, err := runtime.Encode(testapi.Default.Codec(), pod)
assert.Equal(t, nil, err, "must be able to encode a pod's spec data")
taskInfo.Data = data
var statusUpdateCalls sync.WaitGroup
statusUpdateCalls.Add(1)
statusUpdateDone := func(_ mock.Arguments) { statusUpdateCalls.Done() }
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_STARTING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once()
statusUpdateCalls.Add(1)
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_RUNNING,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once()
executor.LaunchTask(mockDriver, taskInfo)
assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool {
executor.lock.Lock()
defer executor.lock.Unlock()
return !registry.empty()
}, "executor must be able to create a task and a pod")
// simulate a pod source update; normally this update is generated when binding a pod
err = registry.phaseChange(pod, api.PodPending)
assert.NoError(t, err)
// simulate a pod source update; normally this update is generated by the kubelet once the pod is healthy
err = registry.phaseChange(pod, api.PodRunning)
assert.NoError(t, err)
// Allow some time for asynchronous requests to the driver.
finished := kmruntime.After(statusUpdateCalls.Wait)
select {
case <-finished:
case <-time.After(wait.ForeverTestTimeout):
t.Fatalf("timed out waiting for status update calls to finish")
}
statusUpdateCalls.Add(1)
mockDriver.On(
"SendStatusUpdate",
mesosproto.TaskState_TASK_KILLED,
).Return(mesosproto.Status_DRIVER_RUNNING, nil).Run(statusUpdateDone).Once()
// simulate what happens when the apiserver is told to delete a pod
mockKubeAPI.On("killPod", pod.Namespace, pod.Name).Return(nil).Run(func(_ mock.Arguments) {
registry.Remove(podTask.ID)
})
executor.KillTask(mockDriver, taskInfo.TaskId)
assertext.EventuallyTrue(t, wait.ForeverTestTimeout, func() bool {
executor.lock.Lock()
defer executor.lock.Unlock()
return registry.empty()
}, "executor must be able to kill a created task and pod")
// Allow some time for asynchronous requests to the driver.
//.........这里部分代码省略.........