本文整理匯總了Golang中github.com/mesosphere/mesos-go/mesos.SchedulerDriver.Init方法的典型用法代碼示例。如果您正苦於以下問題:Golang SchedulerDriver.Init方法的具體用法?Golang SchedulerDriver.Init怎麽用?Golang SchedulerDriver.Init使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mesosphere/mesos-go/mesos.SchedulerDriver
的用法示例。
在下文中一共展示了SchedulerDriver.Init方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
func (self *ResMan) Run(master string) {
frameworkIdStr := FRAMEWORK_ID
frameworkId := &mesos.FrameworkID{Value: &frameworkIdStr}
driver := mesos.SchedulerDriver{
Master: master,
Framework: mesos.FrameworkInfo{
Name: proto.String("TyrantFramework"),
User: proto.String(""),
FailoverTimeout: failoverTimeout,
Id: frameworkId,
},
Scheduler: &mesos.Scheduler{
ResourceOffers: self.OnResourceOffers,
StatusUpdate: self.OnStatusUpdate,
Error: self.OnError,
Disconnected: self.OnDisconnected,
Registered: self.OnRegister,
Reregistered: self.OnReregister,
},
}
driver.Init()
defer driver.Destroy()
go self.EventLoop()
driver.Start()
<-self.exit
log.Debug("exit")
driver.Stop(false)
}
示例2: main
//.........這裏部分代碼省略.........
}
},
StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
log.Printf("Received task status [%s] for task [%s]", rendler.NameFor(status.State), *status.TaskId.Value)
if *status.State == mesos.TaskState_TASK_RUNNING {
tasksRunning++
} else if rendler.IsTerminal(status.State) {
tasksRunning--
}
},
FrameworkMessage: func(
driver *mesos.SchedulerDriver,
executorId mesos.ExecutorID,
slaveId mesos.SlaveID,
message string) {
switch *executorId.Value {
case *crawlExecutor.ExecutorId.Value:
log.Print("Received framework message from crawler")
var result rendler.CrawlResult
err := json.Unmarshal([]byte(message), &result)
if err != nil {
log.Printf("Error deserializing CrawlResult: [%s]", err)
} else {
for _, link := range result.Links {
edge := rendler.Edge{From: result.URL, To: link}
log.Printf("Appending [%s] to crawl results", edge)
crawlResults.PushBack(edge)
alreadyProcessed := false
for e := processedURLs.Front(); e != nil && !alreadyProcessed; e = e.Next() {
processedURL := e.Value.(string)
if link == processedURL {
alreadyProcessed = true
}
}
if !alreadyProcessed {
log.Printf("Enqueueing [%s]", link)
crawlQueue.PushBack(link)
renderQueue.PushBack(link)
processedURLs.PushBack(link)
}
}
}
case *renderExecutor.ExecutorId.Value:
log.Printf("Received framework message from renderer")
var result rendler.RenderResult
err := json.Unmarshal([]byte(message), &result)
if err != nil {
log.Printf("Error deserializing RenderResult: [%s]", err)
} else {
log.Printf(
"Appending [%s] to render results",
rendler.Edge{From: result.URL, To: result.ImageURL})
renderResults[result.URL] = result.ImageURL
}
default:
log.Printf("Received a framework message from some unknown source: %s", *executorId.Value)
}
},
},
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt, os.Kill)
go func(c chan os.Signal) {
s := <-c
fmt.Println("Got signal:", s)
if s == os.Interrupt {
fmt.Println("RENDLER is shutting down")
shuttingDown = true
wait_started := time.Now()
for tasksRunning > 0 && SHUTDOWN_TIMEOUT > int(time.Since(wait_started).Seconds()) {
time.Sleep(time.Second)
}
if tasksRunning > 0 {
fmt.Println("Shutdown by timeout,", tasksRunning, "task(s) have not completed")
}
driver.Stop(false)
}
}(c)
driver.Init()
defer driver.Destroy()
driver.Start()
driver.Join()
driver.Stop(false)
rendler.WriteDOTFile(crawlResults, renderResults)
os.Exit(0)
}
示例3: main
func main() {
taskLimit := 5
taskId := 0
exit := make(chan bool)
localExecutor, _ := executorPath()
master := flag.String("master", "localhost:5050", "Location of leading Mesos master")
executorUri := flag.String("executor-uri", localExecutor, "URI of executor executable")
flag.Parse()
executor := &mesos.ExecutorInfo{
ExecutorId: &mesos.ExecutorID{Value: proto.String("default")},
Command: &mesos.CommandInfo{
Value: proto.String("./example_executor"),
Uris: []*mesos.CommandInfo_URI{
&mesos.CommandInfo_URI{Value: executorUri},
},
},
Name: proto.String("Test Executor (Go)"),
Source: proto.String("go_test"),
}
driver := mesos.SchedulerDriver{
Master: *master,
Framework: mesos.FrameworkInfo{
Name: proto.String("GoFramework"),
User: proto.String(""),
},
Scheduler: &mesos.Scheduler{
ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
for _, offer := range offers {
taskId++
fmt.Printf("Launching task: %d\n", taskId)
tasks := []mesos.TaskInfo{
mesos.TaskInfo{
Name: proto.String("go-task"),
TaskId: &mesos.TaskID{
Value: proto.String("go-task-" + strconv.Itoa(taskId)),
},
SlaveId: offer.SlaveId,
Executor: executor,
Resources: []*mesos.Resource{
mesos.ScalarResource("cpus", 1),
mesos.ScalarResource("mem", 512),
},
},
}
driver.LaunchTasks(offer.Id, tasks)
}
},
StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
fmt.Println("Received task status: " + *status.Message)
if *status.State == mesos.TaskState_TASK_FINISHED {
taskLimit--
if taskLimit <= 0 {
exit <- true
}
}
},
},
}
driver.Init()
defer driver.Destroy()
driver.Start()
<-exit
driver.Stop(false)
}
示例4: main
//.........這裏部分代碼省略.........
for {
cluster.Update()
time.Sleep(updateInterval)
}
}()
slaves := list.New()
for _, slave := range cluster.Sample.Slaves {
slaveHostname := slave.Hostname + ":" + strconv.Itoa(slave.Port)
slaves.PushBack(slaveHostname)
}
// TODO(nnielsen): Move this and callbacks to dedicated scheduler package / struct.
taskToSlave := make(map[string]string)
scheduleTask := func(offer mesos.Offer) *mesos.TaskInfo {
slave := slaves.Front()
if slave == nil {
return nil
}
glog.V(2).Infof("Scheduling slave '%s'", slave.Value.(string))
slaves.Remove(slave)
task := "angstrom-task-" + strconv.Itoa(taskId)
taskToSlave[task] = slave.Value.(string)
return &mesos.TaskInfo{
Name: proto.String("angstrom-task"),
TaskId: &mesos.TaskID{
Value: proto.String(task),
},
SlaveId: offer.SlaveId,
Executor: executor,
Data: []byte("{\"slave\": \"" + slave.Value.(string) + "\"}"),
Resources: []*mesos.Resource{
mesos.ScalarResource("cpus", 0.5),
mesos.ScalarResource("mem", 32),
},
}
}
driver := mesos.SchedulerDriver{
Master: *master,
Framework: mesos.FrameworkInfo{
Name: proto.String("Angstrom metrics"),
User: proto.String(""),
},
Scheduler: &mesos.Scheduler{
ResourceOffers: func(driver *mesos.SchedulerDriver, offers []mesos.Offer) {
for _, offer := range offers {
taskId++
tasks := make([]mesos.TaskInfo, 0)
task := scheduleTask(offer)
if task != nil {
glog.V(2).Infof("Launching on offer %v", offer.Id)
tasks = append(tasks, *task)
driver.LaunchTasks(offer.Id, tasks)
} else {
glog.V(2).Infof("Declining offer %v", offer.Id)
driver.DeclineOffer(offer.Id)
}
}
},
FrameworkMessage: func(driver *mesos.SchedulerDriver, _executorId mesos.ExecutorID, slaveId mesos.SlaveID, data string) {
var target []payload.StatisticsInfo
err := json.Unmarshal([]byte(data), &target)
if err != nil {
return
}
cluster.AddSlaveSamples(slaveId, target)
},
StatusUpdate: func(driver *mesos.SchedulerDriver, status mesos.TaskStatus) {
// TODO(nnielsen): Readd slave task to queue in case of any terminal state.
if *status.State == mesos.TaskState_TASK_RUNNING {
} else if *status.State == mesos.TaskState_TASK_FINISHED {
}
},
},
}
driver.Init()
defer driver.Destroy()
driver.Start()
endpoints.Initialize(defaultPort, *angstromPath, cluster)
glog.V(2).Infof("Waiting for threads to join")
driver.Join()
}