本文整理汇总了Golang中k8s/io/kubernetes/contrib/mesos/pkg/scheduler/podtask.MakePodKey函数的典型用法代码示例。如果您正苦于以下问题:Golang MakePodKey函数的具体用法?Golang MakePodKey怎么用?Golang MakePodKey使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了MakePodKey函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: handleSchedulingError
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) handleSchedulingError(pod *api.Pod, schedulingErr error) {
if schedulingErr == noSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer util.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.GC()
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
case podtask.StatePending:
if task.Has(podtask.Launched) {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
breakoutEarly := queue.BreakChan(nil)
if schedulingErr == noSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
breakoutEarly = queue.BreakChan(k.api.offers().Listen(podKey, func(offer *mesos.Offer) bool {
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().Get(task.ID); state {
case podtask.StatePending:
// Assess fitness of pod with the current offer. The scheduler normally
// "backs off" when it can't find an offer that matches up with a pod.
// The backoff period for a pod can terminate sooner if an offer becomes
// available that matches up.
return !task.Has(podtask.Launched) && k.api.algorithm().FitPredicate()(task, offer, nil)
default:
// no point in continuing to check for matching offers
return true
}
}))
}
delay := k.backoff.Get(podKey)
log.V(3).Infof("requeuing pod %v with delay %v", podKey, delay)
k.qr.requeue(&Pod{Pod: pod, delay: &delay, notify: breakoutEarly})
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
示例2: deleteOne
func (k *deleter) deleteOne(pod *Pod) error {
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
return err
}
log.V(2).Infof("pod deleted: %v", podKey)
// order is important here: we want to make sure we have the lock before
// removing the pod from the scheduling queue. this makes the concurrent
// execution of scheduler-error-handling and delete-handling easier to
// reason about.
k.api.Lock()
defer k.api.Unlock()
// prevent the scheduler from attempting to pop this; it's also possible that
// it's concurrently being scheduled (somewhere between pod scheduling and
// binding) - if so, then we'll end up removing it from taskRegistry which
// will abort Bind()ing
k.qr.dequeue(pod.GetUID())
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
log.V(2).Infof("Could not resolve pod '%s' to task id", podKey)
return noSuchPodErr
// determine if the task has already been launched to mesos, if not then
// cleanup is easier (unregister) since there's no state to sync
case podtask.StatePending:
if !task.Has(podtask.Launched) {
// we've been invoked in between Schedule() and Bind()
if task.HasAcceptedOffer() {
task.Offer.Release()
task.Reset()
task.Set(podtask.Deleted)
//TODO(jdef) probably want better handling here
if err := k.api.tasks().Update(task); err != nil {
return err
}
}
k.api.tasks().Unregister(task)
return nil
}
fallthrough
case podtask.StateRunning:
// signal to watchers that the related pod is going down
task.Set(podtask.Deleted)
if err := k.api.tasks().Update(task); err != nil {
log.Errorf("failed to update task w/ Deleted status: %v", err)
}
return k.api.killTask(task.ID)
default:
log.Infof("cannot kill pod '%s': non-terminal task not found %v", podKey, task.ID)
return noSuchTaskErr
}
}
示例3: reconcileTask
// this pod may be out of sync with respect to the API server registry:
// this pod | apiserver registry
// -------------|----------------------
// host=.* | 404 ; pod was deleted
// host=.* | 5xx ; failed to sync, try again later?
// host="" | host="" ; perhaps no updates to process?
// host="" | host="..." ; pod has been scheduled and assigned, is there a task assigned? (check TaskIdKey in binding?)
// host="..." | host="" ; pod is no longer scheduled, does it need to be re-queued?
// host="..." | host="..." ; perhaps no updates to process?
//
// TODO(jdef) this needs an integration test
func (s *schedulingPlugin) reconcileTask(t *podtask.T) {
log.V(1).Infof("reconcile pod %v, assigned to slave %q", t.Pod.Name, t.Spec.AssignedSlave)
ctx := api.WithNamespace(api.NewDefaultContext(), t.Pod.Namespace)
pod, err := s.client.Pods(api.NamespaceValue(ctx)).Get(t.Pod.Name)
if err != nil {
if errors.IsNotFound(err) {
// attempt to delete
if err = s.deleter.deleteOne(&Pod{Pod: &t.Pod}); err != nil && err != noSuchPodErr && err != noSuchTaskErr {
log.Errorf("failed to delete pod: %v: %v", t.Pod.Name, err)
}
} else {
//TODO(jdef) other errors should probably trigger a retry (w/ backoff).
//For now, drop the pod on the floor
log.Warning("aborting reconciliation for pod %v: %v", t.Pod.Name, err)
}
return
}
log.Infof("pod %v scheduled on %q according to apiserver", pod.Name, pod.Spec.NodeName)
if t.Spec.AssignedSlave != pod.Spec.NodeName {
if pod.Spec.NodeName == "" {
// pod is unscheduled.
// it's possible that we dropped the pod in the scheduler error handler
// because of task misalignment with the pod (task.Has(podtask.Launched) == true)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
log.Error(err)
return
}
s.api.Lock()
defer s.api.Unlock()
if _, state := s.api.tasks().ForPod(podKey); state != podtask.StateUnknown {
//TODO(jdef) reconcile the task
log.Errorf("task already registered for pod %v", pod.Name)
return
}
now := time.Now()
log.V(3).Infof("reoffering pod %v", podKey)
s.qr.reoffer(&Pod{
Pod: pod,
deadline: &now,
})
} else {
// pod is scheduled.
// not sure how this happened behind our backs. attempt to reconstruct
// at least a partial podtask.T record.
//TODO(jdef) reconcile the task
log.Errorf("pod already scheduled: %v", pod.Name)
}
} else {
//TODO(jdef) for now, ignore the fact that the rest of the spec may be different
//and assume that our knowledge of the pod aligns with that of the apiserver
log.Error("pod reconciliation does not support updates; not yet implemented")
}
}
示例4: Schedule
// Schedule implements the Scheduler interface of Kubernetes.
// It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.MinionLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
// default upstream scheduler passes pod.Name as binding.PodID
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
return "", err
}
k.api.Lock()
defer k.api.Unlock()
switch task, state := k.api.tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// There's a bit of a potential race here, a pod could have been yielded() and
// then before we get *here* it could be deleted.
// We use meta to index the pod in the store since that's what k8s reflector does.
podName, err := cache.MetaNamespaceKeyFunc(pod)
if err != nil {
log.Warningf("aborting Schedule, unable to understand pod object %+v", pod)
return "", noSuchPodErr
}
if deleted := k.podUpdates.Poll(podName, queue.DELETE_EVENT); deleted {
// avoid scheduling a pod that's been deleted between yieldPod() and Schedule()
log.Infof("aborting Schedule, pod has been deleted %+v", pod)
return "", noSuchPodErr
}
return k.doSchedule(k.api.tasks().Register(k.api.createPodTask(ctx, pod)))
//TODO(jdef) it's possible that the pod state has diverged from what
//we knew previously, we should probably update the task.Pod state here
//before proceeding with scheduling
case podtask.StatePending:
if pod.UID != task.Pod.UID {
// we're dealing with a brand new pod spec here, so the old one must have been
// deleted -- and so our task store is out of sync w/ respect to reality
//TODO(jdef) reconcile task
return "", fmt.Errorf("task %v spec is out of sync with pod %v spec, aborting schedule", task.ID, pod.Name)
} else if task.Has(podtask.Launched) {
// task has been marked as "launched" but the pod binding creation may have failed in k8s,
// but we're going to let someone else handle it, probably the mesos task error handler
return "", fmt.Errorf("task %s has already been launched, aborting schedule", task.ID)
} else {
return k.doSchedule(task, nil)
}
default:
return "", fmt.Errorf("task %s is not pending, nothing to schedule", task.ID)
}
}
示例5: Error
// implementation of scheduling plugin's Error func; see plugin/pkg/scheduler
func (k *errorHandler) Error(pod *api.Pod, schedulingErr error) {
if schedulingErr == errors.NoSuchPodErr {
log.V(2).Infof("Not rescheduling non-existent pod %v", pod.Name)
return
}
log.Infof("Error scheduling %v: %v; retrying", pod.Name, schedulingErr)
defer runtime.HandleCrash()
// default upstream scheduler passes pod.Name as binding.PodID
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
podKey, err := podtask.MakePodKey(ctx, pod.Name)
if err != nil {
log.Errorf("Failed to construct pod key, aborting scheduling for pod %v: %v", pod.Name, err)
return
}
k.backoff.GC()
k.sched.Lock()
defer k.sched.Unlock()
switch task, state := k.sched.Tasks().ForPod(podKey); state {
case podtask.StateUnknown:
// if we don't have a mapping here any more then someone deleted the pod
log.V(2).Infof("Could not resolve pod to task, aborting pod reschdule: %s", podKey)
return
case podtask.StatePending:
if task.Has(podtask.Launched) {
log.V(2).Infof("Skipping re-scheduling for already-launched pod %v", podKey)
return
}
breakoutEarly := queue.BreakChan(nil)
if schedulingErr == errors.NoSuitableOffersErr {
log.V(3).Infof("adding backoff breakout handler for pod %v", podKey)
breakoutEarly = k.newBreakChan(podKey)
}
delay := k.backoff.Get(podKey)
log.V(3).Infof("requeuing pod %v with delay %v", podKey, delay)
k.qr.Requeue(queuer.NewPod(pod, queuer.Delay(delay), queuer.Notify(breakoutEarly)))
default:
log.V(2).Infof("Task is no longer pending, aborting reschedule for pod %v", podKey)
}
}
示例6: Bind
// implements binding.Registry, launches the pod-associated-task in mesos
func (b *binder) Bind(binding *api.Binding) error {
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
// default upstream scheduler passes pod.Name as binding.Name
podKey, err := podtask.MakePodKey(ctx, binding.Name)
if err != nil {
return err
}
b.sched.Lock()
defer b.sched.Unlock()
switch task, state := b.sched.Tasks().ForPod(podKey); state {
case podtask.StatePending:
return b.bind(ctx, binding, task)
default:
// in this case it's likely that the pod has been deleted between Schedule
// and Bind calls
log.Infof("No pending task for pod %s", podKey)
return errors.NoSuchPodErr //TODO(jdef) this error is somewhat misleading since the task could be running?!
}
}
示例7: TestPlugin_LifeCycle
//.........这里部分代码省略.........
return startPodWithOffers(pod, offers)
}
// start another pod
pod, launchedTask, _ := startTestPod()
// mock drvier.KillTask, should be invoked when a pod is deleted
mockDriver.On("KillTask", mAny("*mesosproto.TaskID")).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
killedTaskId := *(args.Get(0).(*mesos.TaskID))
assert.Equal(*launchedTask.taskInfo.TaskId, killedTaskId, "expected same TaskID as during launch")
})
killTaskCalled := mockDriver.Upon()
// stop it again via the apiserver mock
podListWatch.Delete(pod, true) // notify watchers
// and wait for the driver killTask call with the correct TaskId
select {
case <-killTaskCalled:
// report back that the task is finished
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_FINISHED))
case <-time.After(5 * time.Second):
t.Fatal("timed out waiting for KillTask")
}
// start a pod with on a given NodeName and check that it is scheduled to the right host
pod, i = NewTestPod()
pod.Spec.NodeName = "hostname1"
offers = []*mesos.Offer{}
for j := 0; j < 3; j++ {
offer := NewTestOffer(fmt.Sprintf("offer%d_%d", i, j))
hostname := fmt.Sprintf("hostname%d", j)
offer.Hostname = &hostname
offers = append(offers, offer)
}
_, _, usedOffer := startPodWithOffers(pod, offers)
assert.Equal(offers[1].Id.GetValue(), usedOffer.Id.GetValue())
assert.Equal(pod.Spec.NodeName, *usedOffer.Hostname)
testScheduler.OfferRescinded(mockDriver, offers[0].Id)
testScheduler.OfferRescinded(mockDriver, offers[2].Id)
// start pods:
// - which are failing while binding,
// - leading to reconciliation
// - with different states on the apiserver
failPodFromExecutor := func(task *mesos.TaskInfo) {
beforePodLookups := testApiServer.Stats(pod.Name)
status := newTaskStatusForTask(task, mesos.TaskState_TASK_FAILED)
message := messages.CreateBindingFailure
status.Message = &message
testScheduler.StatusUpdate(mockDriver, status)
// wait until pod is looked up at the apiserver
assertext.EventuallyTrue(t, time.Second, func() bool {
return testApiServer.Stats(pod.Name) == beforePodLookups+1
}, "expect that reconcileTask will access apiserver for pod %v", pod.Name)
}
launchTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
pod, i := NewTestPod()
offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
return launchPodWithOffers(pod, offers)
}
// 1. with pod deleted from the apiserver
// expected: pod is removed from internal task registry
pod, launchedTask, _ = launchTestPod()
podListWatch.Delete(pod, false) // not notifying the watchers
failPodFromExecutor(launchedTask.taskInfo)
podKey, _ := podtask.MakePodKey(api.NewDefaultContext(), pod.Name)
assertext.EventuallyTrue(t, time.Second, func() bool {
t, _ := p.api.tasks().ForPod(podKey)
return t == nil
})
// 2. with pod still on the apiserver, not bound
// expected: pod is rescheduled
pod, launchedTask, _ = launchTestPod()
failPodFromExecutor(launchedTask.taskInfo)
retryOffers := []*mesos.Offer{NewTestOffer("retry-offer")}
schedulePodWithOffers(pod, retryOffers)
// 3. with pod still on the apiserver, bound, notified via ListWatch
// expected: nothing, pod updates not supported, compare ReconcileTask function
pod, launchedTask, usedOffer = startTestPod()
pod.Annotations = map[string]string{
meta.BindingHostKey: *usedOffer.Hostname,
}
pod.Spec.NodeName = *usedOffer.Hostname
podListWatch.Modify(pod, true) // notifying the watchers
time.Sleep(time.Second / 2)
failPodFromExecutor(launchedTask.taskInfo)
}
示例8: TestScheduler_LifeCycle
//.........这里部分代码省略.........
// mock driver.KillTask, should be invoked when a pod is deleted
lt.driver.On("KillTask",
mock.AnythingOfType("*mesosproto.TaskID"),
).Return(mesos.Status_DRIVER_RUNNING, nil).Run(func(args mock.Arguments) {
killedTaskId := *(args.Get(0).(*mesos.TaskID))
assert.Equal(*launchedTask.taskInfo.TaskId, killedTaskId, "expected same TaskID as during launch")
})
killTaskCalled := lt.driver.Upon()
// stop it again via the apiserver mock
lt.podsListWatch.Delete(pod, true) // notify watchers
// and wait for the driver killTask call with the correct TaskId
select {
case <-killTaskCalled:
// report back that the task is finished
lt.framework.StatusUpdate(
lt.driver,
newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_FINISHED),
)
case <-time.After(util.ForeverTestTimeout):
t.Fatal("timed out waiting for KillTask")
}
// start a pod with on a given NodeName and check that it is scheduled to the right host
pod, i = NewTestPod()
pod.Spec.NodeName = "hostname1"
offers = []*mesos.Offer{}
for j := 0; j < 3; j++ {
offer := NewTestOffer(fmt.Sprintf("offer%d_%d", i, j))
hostname := fmt.Sprintf("hostname%d", j)
offer.Hostname = &hostname
offers = append(offers, offer)
}
_, _, usedOffer := startPodWithOffers(pod, offers)
assert.Equal(offers[1].Id.GetValue(), usedOffer.Id.GetValue())
assert.Equal(pod.Spec.NodeName, *usedOffer.Hostname)
lt.framework.OfferRescinded(lt.driver, offers[0].Id)
lt.framework.OfferRescinded(lt.driver, offers[2].Id)
// start pods:
// - which are failing while binding,
// - leading to reconciliation
// - with different states on the apiserver
failPodFromExecutor := func(task *mesos.TaskInfo) {
beforePodLookups := lt.apiServer.Stats(pod.Name)
status := newTaskStatusForTask(task, mesos.TaskState_TASK_FAILED)
message := messages.CreateBindingFailure
status.Message = &message
lt.framework.StatusUpdate(lt.driver, status)
// wait until pod is looked up at the apiserver
assertext.EventuallyTrue(t, util.ForeverTestTimeout, func() bool {
return lt.apiServer.Stats(pod.Name) == beforePodLookups+1
}, "expect that reconcileTask will access apiserver for pod %v", pod.Name)
}
launchTestPod := func() (*api.Pod, *LaunchedTask, *mesos.Offer) {
pod, i := NewTestPod()
offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
return launchPodWithOffers(pod, offers)
}
// 1. with pod deleted from the apiserver
// expected: pod is removed from internal task registry
pod, launchedTask, _ = launchTestPod()
lt.podsListWatch.Delete(pod, false) // not notifying the watchers
failPodFromExecutor(launchedTask.taskInfo)
podKey, _ := podtask.MakePodKey(api.NewDefaultContext(), pod.Name)
assertext.EventuallyTrue(t, util.ForeverTestTimeout, func() bool {
t, _ := lt.sched.Tasks().ForPod(podKey)
return t == nil
})
// 2. with pod still on the apiserver, not bound
// expected: pod is rescheduled
pod, launchedTask, _ = launchTestPod()
failPodFromExecutor(launchedTask.taskInfo)
retryOffers := []*mesos.Offer{NewTestOffer("retry-offer")}
schedulePodWithOffers(pod, retryOffers)
// 3. with pod still on the apiserver, bound, notified via ListWatch
// expected: nothing, pod updates not supported, compare ReconcileTask function
pod, launchedTask, usedOffer = startTestPod()
pod.Annotations = map[string]string{
meta.BindingHostKey: *usedOffer.Hostname,
}
pod.Spec.NodeName = *usedOffer.Hostname
lt.podsListWatch.Modify(pod, true) // notifying the watchers
time.Sleep(time.Second / 2)
failPodFromExecutor(launchedTask.taskInfo)
}