本文整理匯總了Golang中k8s/io/kubernetes/contrib/mesos/pkg/offers.Perishable.Details方法的典型用法代碼示例。如果您正苦於以下問題:Golang Perishable.Details方法的具體用法?Golang Perishable.Details怎麽用?Golang Perishable.Details使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類k8s/io/kubernetes/contrib/mesos/pkg/offers.Perishable
的用法示例。
在下文中一共展示了Perishable.Details方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: doSchedule
// doSchedule schedules the given task and returns the machine the task is scheduled on
// or an error if the scheduling failed.
func (k *kubeScheduler) doSchedule(task *podtask.T) (string, error) {
var offer offers.Perishable
var err error
if task.HasAcceptedOffer() {
// verify that the offer is still on the table
var ok bool
offer, ok = k.api.offers().Get(task.GetOfferId())
if !ok || offer.HasExpired() {
task.Offer.Release()
task.Reset()
if err = k.api.tasks().Update(task); err != nil {
return "", err
}
}
}
if offer == nil {
offer, err = k.api.algorithm().SchedulePod(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
details := offer.Details()
if details == nil {
return "", fmt.Errorf("offer already invalid/expired for task %v", task.ID)
}
slaveId := details.GetSlaveId().GetValue()
slaveHostName := k.api.slaveHostNameFor(slaveId)
if slaveHostName == "" {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(details.Id.GetValue())
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
}
if task.Offer != nil && task.Offer != offer {
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
}
task.Offer = offer
if err := k.api.algorithm().Procurement()(task, details); err != nil {
offer.Release()
task.Reset()
return "", err
}
if err := k.api.tasks().Update(task); err != nil {
offer.Release()
return "", err
}
return slaveHostName, nil
}
示例2: doSchedule
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) {
var offer offers.Perishable
if task.HasAcceptedOffer() {
// verify that the offer is still on the table
offerId := task.GetOfferId()
if offer, ok := k.api.offers().Get(offerId); ok && !offer.HasExpired() {
// skip tasks that have already have assigned offers
offer = task.Offer
} else {
task.Offer.Release()
task.Reset()
if err = k.api.tasks().Update(task); err != nil {
return "", err
}
}
}
if err == nil && offer == nil {
offer, err = k.api.algorithm()(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
details := offer.Details()
if details == nil {
return "", fmt.Errorf("offer already invalid/expired for task %v", task.ID)
}
slaveId := details.GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(details.Id.GetValue())
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
if task.Offer != nil && task.Offer != offer {
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
}
// write resource limits into the pod spec which is transfered to the executor. From here
// on we can expect that the pod spec of a task has proper limits for CPU and memory.
// TODO(sttts): For a later separation of the kubelet and the executor also patch the pod on the apiserver
if unlimitedCPU := mresource.LimitPodCPU(&task.Pod, k.defaultContainerCPULimit); unlimitedCPU {
log.Warningf("Pod %s/%s without cpu limits is admitted %.2f cpu shares", task.Pod.Namespace, task.Pod.Name, mresource.PodCPULimit(&task.Pod))
}
if unlimitedMem := mresource.LimitPodMem(&task.Pod, k.defaultContainerMemLimit); unlimitedMem {
log.Warningf("Pod %s/%s without memory limits is admitted %.2f MB", task.Pod.Namespace, task.Pod.Name, mresource.PodMemLimit(&task.Pod))
}
task.Offer = offer
task.FillFromDetails(details)
if err := k.api.tasks().Update(task); err != nil {
offer.Release()
return "", err
}
return slave.HostName, nil
}
}
示例3: doSchedule
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *schedulerAlgorithm) doSchedule(task *podtask.T) (string, error) {
var offer offers.Perishable
var err error
if task.HasAcceptedOffer() {
// verify that the offer is still on the table
var ok bool
offer, ok = k.sched.Offers().Get(task.GetOfferId())
if !ok || offer.HasExpired() {
task.Offer.Release()
task.Reset()
if err = k.sched.Tasks().Update(task); err != nil {
return "", err
}
}
}
if offer == nil {
offer, err = k.podScheduler.SchedulePod(k.sched.Offers(), task)
}
if err != nil {
return "", err
}
details := offer.Details()
if details == nil {
return "", fmt.Errorf("offer already invalid/expired for task %v", task.ID)
}
if task.Offer != nil && task.Offer != offer {
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
}
task.Offer = offer
if err := k.podScheduler.Procurement()(task, details); err != nil {
offer.Release()
task.Reset()
return "", err
}
if err := k.sched.Tasks().Update(task); err != nil {
offer.Release()
return "", err
}
return details.GetHostname(), nil
}
示例4: doSchedule
// Call ScheduleFunc and subtract some resources, returning the name of the machine the task is scheduled on
func (k *kubeScheduler) doSchedule(task *podtask.T, err error) (string, error) {
var offer offers.Perishable
if task.HasAcceptedOffer() {
// verify that the offer is still on the table
offerId := task.GetOfferId()
if offer, ok := k.api.offers().Get(offerId); ok && !offer.HasExpired() {
// skip tasks that have already have assigned offers
offer = task.Offer
} else {
task.Offer.Release()
task.Reset()
if err = k.api.tasks().Update(task); err != nil {
return "", err
}
}
}
if err == nil && offer == nil {
offer, err = k.api.algorithm().SchedulePod(k.api.offers(), k.api, task)
}
if err != nil {
return "", err
}
details := offer.Details()
if details == nil {
return "", fmt.Errorf("offer already invalid/expired for task %v", task.ID)
}
slaveId := details.GetSlaveId().GetValue()
if slave, ok := k.api.slaveFor(slaveId); !ok {
// not much sense in Release()ing the offer here since its owner died
offer.Release()
k.api.offers().Invalidate(details.Id.GetValue())
return "", fmt.Errorf("Slave disappeared (%v) while scheduling task %v", slaveId, task.ID)
} else {
if task.Offer != nil && task.Offer != offer {
return "", fmt.Errorf("task.offer assignment must be idempotent, task %+v: offer %+v", task, offer)
}
task.Offer = offer
k.api.algorithm().Procurement()(task, details) // TODO(jdef) why is nothing checking the error returned here?
if err := k.api.tasks().Update(task); err != nil {
offer.Release()
return "", err
}
return slave.HostName, nil
}
}