本文整理匯總了Golang中github.com/mesos/mesos-go/scheduler.SchedulerDriver.AcceptOffers方法的典型用法代碼示例。如果您正苦於以下問題:Golang SchedulerDriver.AcceptOffers方法的具體用法?Golang SchedulerDriver.AcceptOffers怎麽用?Golang SchedulerDriver.AcceptOffers使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mesos/mesos-go/scheduler.SchedulerDriver
的用法示例。
在下文中一共展示了SchedulerDriver.AcceptOffers方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: acceptOffer
func (sc *SchedulerCore) acceptOffer(driver sched.SchedulerDriver, offer *mesos.Offer, operations []*mesos.Offer_Operation) {
log.Infof("Accepting OfferID: %+v, Operations: %+v", *offer.Id.Value, operations)
var status mesos.Status
var err error
if sc.compatibilityMode {
tasks := []*mesos.TaskInfo{}
for _, operation := range operations {
if *operation.Type == mesos.Offer_Operation_LAUNCH {
tasks = operation.Launch.TaskInfos
}
}
status, err = driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
} else {
status, err = driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
}
if status != mesos.Status_DRIVER_RUNNING {
log.Fatal("Driver not running, while trying to accept offers")
}
if err != nil {
log.Panic("Failed to launch tasks: ", err)
}
}
示例2: ResourceOffers
//.........這裏部分代碼省略.........
MEM_PER_TASK <= reservedMem &&
DISK_PER_TASK <= reservedDisk &&
resourcesHaveVolume(offer.Resources, task.persistenceId) {
taskId := &mesos.TaskID{
Value: proto.String(task.name),
}
taskInfo := &mesos.TaskInfo{
Name: proto.String("go-task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Executor: task.executor,
Resources: []*mesos.Resource{
util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role),
},
}
taskInfosToLaunch = append(taskInfosToLaunch, taskInfo)
task.state = LaunchedState
reservedCpus = reservedCpus - CPUS_PER_TASK
reservedMem = reservedMem - MEM_PER_TASK
reservedDisk = reservedDisk - DISK_PER_TASK
log.Infof("Prepared task: %s with offer %s for launch\n", taskInfo.GetName(), offer.Id.GetValue())
}
case FinishedState:
resourcesToDestroy = append(resourcesToDestroy,
util.NewVolumeResourceWithReservation(DISK_PER_TASK, task.containerPath, task.persistenceId, mesos.Volume_RW.Enum(), *mesosAuthPrincipal, *role))
resourcesToUnreserve = append(resourcesToUnreserve, []*mesos.Resource{
util.NewScalarResourceWithReservation("cpus", CPUS_PER_TASK, *mesosAuthPrincipal, *role),
util.NewScalarResourceWithReservation("mem", MEM_PER_TASK, *mesosAuthPrincipal, *role),
util.NewScalarResourceWithReservation("disk", DISK_PER_TASK, *mesosAuthPrincipal, *role),
}...)
task.state = UnreservedState
case UnreservedState:
totalUnreserved = totalUnreserved + 1
}
}
// Clean up reservations we no longer need
if len(resourcesToReserve) == 0 && len(resourcesToCreate) == 0 && len(taskInfosToLaunch) == 0 {
if reservedCpus >= 0.0 {
resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("cpus", reservedCpus, *mesosAuthPrincipal, *role))
}
if reservedMem >= 0.0 {
resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedCpus, *mesosAuthPrincipal, *role))
}
if reservedDisk >= 0.0 {
filtered := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "disk" &&
res.Reservation != nil &&
res.Disk != nil
})
for _, volume := range filtered {
resourcesToDestroy = append(resourcesToDestroy,
util.NewVolumeResourceWithReservation(
volume.GetScalar().GetValue(),
volume.Disk.Volume.GetContainerPath(),
volume.Disk.Persistence.GetId(),
volume.Disk.Volume.Mode,
*mesosAuthPrincipal,
*role))
}
resourcesToUnreserve = append(resourcesToUnreserve, util.NewScalarResourceWithReservation("mem", reservedDisk, *mesosAuthPrincipal, *role))
}
}
// Make a single operation per type
if len(resourcesToReserve) > 0 {
operations = append(operations, util.NewReserveOperation(resourcesToReserve))
}
if len(resourcesToCreate) > 0 {
operations = append(operations, util.NewCreateOperation(resourcesToCreate))
}
if len(resourcesToUnreserve) > 0 {
operations = append(operations, util.NewUnreserveOperation(resourcesToUnreserve))
}
if len(resourcesToDestroy) > 0 {
operations = append(operations, util.NewDestroyOperation(resourcesToDestroy))
}
if len(taskInfosToLaunch) > 0 {
operations = append(operations, util.NewLaunchOperation(taskInfosToLaunch))
}
log.Infoln("Accepting offers with ", len(operations), "operations for offer", offer.Id.GetValue())
refuseSeconds := 5.0
if len(operations) == 0 {
refuseSeconds = 5.0
}
driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(refuseSeconds)})
if totalUnreserved >= len(sched.tasks) {
log.Infoln("Total tasks completed and unreserved, stopping framework.")
driver.Stop(false)
}
}
}