本文整理匯總了Golang中github.com/mesosphere/mesos-go/mesos.SchedulerDriver.DeclineOffer方法的典型用法代碼示例。如果您正苦於以下問題:Golang SchedulerDriver.DeclineOffer方法的具體用法?Golang SchedulerDriver.DeclineOffer怎麽用?Golang SchedulerDriver.DeclineOffer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mesosphere/mesos-go/mesos.SchedulerDriver
的用法示例。
在下文中一共展示了SchedulerDriver.DeclineOffer方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: main
//.........這裏部分代碼省略.........
cpus -= TASK_CPUS
mem -= TASK_MEM
}
return count
}
printQueueStatistics := func() {
// TODO(nnielsen): Print queue lengths.
}
driver := mesos.SchedulerDriver{
Master: *master,
Framework: mesos.FrameworkInfo{
Name: proto.String("RENDLER"),
User: proto.String(""),
},
Scheduler: &mesos.Scheduler{
Registered: func(
driver *mesos.SchedulerDriver,
frameworkId mesos.FrameworkID,
masterInfo mesos.MasterInfo) {
log.Printf("Registered")
},
ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
printQueueStatistics()
for _, offer := range offers {
if shuttingDown {
fmt.Println("Shutting down: declining offer on [", offer.Hostname, "]")
driver.DeclineOffer(offer.Id)
continue
}
tasks := []mesos.TaskInfo{}
for i := 0; i < maxTasksForOffer(offer)/2; i++ {
if crawlQueue.Front() != nil {
url := crawlQueue.Front().Value.(string)
crawlQueue.Remove(crawlQueue.Front())
task := makeCrawlTask(url, offer)
tasks = append(tasks, *task)
}
if renderQueue.Front() != nil {
url := renderQueue.Front().Value.(string)
renderQueue.Remove(renderQueue.Front())
task := makeRenderTask(url, offer)
tasks = append(tasks, *task)
}
}
if len(tasks) == 0 {
driver.DeclineOffer(offer.Id)
} else {
driver.LaunchTasks(offer.Id, tasks)
}
}
},
StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
log.Printf("Received task status [%s] for task [%s]", rendler.NameFor(status.State), *status.TaskId.Value)
if *status.State == mesos.TaskState_TASK_RUNNING {
示例2: main
//.........這裏部分代碼省略.........
for {
cluster.Update()
time.Sleep(updateInterval)
}
}()
slaves := list.New()
for _, slave := range cluster.Sample.Slaves {
slaveHostname := slave.Hostname + ":" + strconv.Itoa(slave.Port)
slaves.PushBack(slaveHostname)
}
// TODO(nnielsen): Move this and callbacks to dedicated scheduler package / struct.
taskToSlave := make(map[string]string)
scheduleTask := func(offer mesos.Offer) *mesos.TaskInfo {
slave := slaves.Front()
if slave == nil {
return nil
}
glog.V(2).Infof("Scheduling slave '%s'", slave.Value.(string))
slaves.Remove(slave)
task := "angstrom-task-" + strconv.Itoa(taskId)
taskToSlave[task] = slave.Value.(string)
return &mesos.TaskInfo{
Name: proto.String("angstrom-task"),
TaskId: &mesos.TaskID{
Value: proto.String(task),
},
SlaveId: offer.SlaveId,
Executor: executor,
Data: []byte("{\"slave\": \"" + slave.Value.(string) + "\"}"),
Resources: []*mesos.Resource{
mesos.ScalarResource("cpus", 0.5),
mesos.ScalarResource("mem", 32),
},
}
}
driver := mesos.SchedulerDriver{
Master: *master,
Framework: mesos.FrameworkInfo{
Name: proto.String("Angstrom metrics"),
User: proto.String(""),
},
Scheduler: &mesos.Scheduler{
ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
for _, offer := range offers {
taskId++
tasks := make([]mesos.TaskInfo, 0)
task := scheduleTask(offer)
if task != nil {
glog.V(2).Infof("Launching on offer %v", offer.Id)
tasks = append(tasks, *task)
driver.LaunchTasks(offer.Id, tasks)
} else {
glog.V(2).Infof("Declining offer %v", offer.Id)
driver.DeclineOffer(offer.Id)
}
}
},
FrameworkMessage: func(driver *mesos.SchedulerDriver, _executorId mesos.ExecutorID, slaveId mesos.SlaveID, data string) {
var target []payload.StatisticsInfo
err := json.Unmarshal([]byte(data), &target)
if err != nil {
return
}
cluster.AddSlaveSamples(slaveId, target)
},
StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
// TODO(nnielsen): Readd slave task to queue in case of any terminal state.
if *status.State == mesos.TaskState_TASK_RUNNING {
} else if *status.State == mesos.TaskState_TASK_FINISHED {
}
},
},
}
driver.Init()
defer driver.Destroy()
driver.Start()
endpoints.Initialize(defaultPort, *angstromPath, cluster)
glog.V(2).Infof("Waiting for threads to join")
driver.Join()
}