本文整理匯總了Golang中github.com/mesos/mesos-go/scheduler.SchedulerDriver.LaunchTasks方法的典型用法代碼示例。如果您正苦於以下問題:Golang SchedulerDriver.LaunchTasks方法的具體用法?Golang SchedulerDriver.LaunchTasks怎麽用?Golang SchedulerDriver.LaunchTasks使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/mesos/mesos-go/scheduler.SchedulerDriver
的用法示例。
在下文中一共展示了SchedulerDriver.LaunchTasks方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: acceptOffer
func (sc *SchedulerCore) acceptOffer(driver sched.SchedulerDriver, offer *mesos.Offer, operations []*mesos.Offer_Operation) {
log.Infof("Accepting OfferID: %+v, Operations: %+v", *offer.Id.Value, operations)
var status mesos.Status
var err error
if sc.compatibilityMode {
tasks := []*mesos.TaskInfo{}
for _, operation := range operations {
if *operation.Type == mesos.Offer_Operation_LAUNCH {
tasks = operation.Launch.TaskInfos
}
}
status, err = driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
} else {
status, err = driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
}
if status != mesos.Status_DRIVER_RUNNING {
log.Fatal("Driver not running, while trying to accept offers")
}
if err != nil {
log.Panic("Failed to launch tasks: ", err)
}
}
示例2: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer, tcpPort, udpPort),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
},
Data: data,
Labels: utils.StringToLabels(s.labels),
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
示例3: ResourceOffers
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
jobs, err := getLaunchableJobs()
if err != nil {
log.Errorf("Unable to get pending jobs! %s\n", err.Error())
return
}
offersAndTasks, err := packJobsInOffers(jobs, offers)
if err != nil {
log.Errorf("Unable to pack jobs into offers! %s\n", err.Error())
return
}
for _, ot := range offersAndTasks {
if len(ot.Tasks) == 0 {
log.Infof("Declining unused offer %s", ot.Offer.Id.GetValue())
driver.DeclineOffer(ot.Offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
continue
} else {
log.Infof("Launching %d tasks for offer %s\n", len(ot.Tasks), ot.Offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{ot.Offer.Id}, ot.Tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
sched.tasksLaunched = sched.tasksLaunched + len(ot.Tasks)
}
}
}
示例4: ResourceOffers
func (s *MemcacheScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
log.Printf("Received %d resource offers", len(offers))
for _, offer := range offers {
select {
case <-s.shutdown:
log.Println("Shutting down: declining offer on [", offer.Hostname, "]")
driver.DeclineOffer(offer.Id, defaultFilter)
if s.tasksRunning == 0 {
close(s.done)
}
continue
default:
}
tasks := []*mesos.TaskInfo{}
if canLaunchNewTask(offer) && s.shouldLaunchNewTask() {
fmt.Println("Accepting Offer: ", offer)
task := s.newMemcacheTask(offer)
tasks = append(tasks, task)
}
if len(tasks) == 0 {
driver.DeclineOffer(offer.Id, defaultFilter)
} else {
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, defaultFilter)
s.lastTaskCreatedAt = time.Now()
return // limit one at a time
}
}
}
示例5: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
},
Data: data,
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
示例6: ResourceOffers
// ResourceOffers handles the Resource Offers
func (s *Scheduler) ResourceOffers(driver mesossched.SchedulerDriver, offers []*mesosproto.Offer) {
logrus.WithField("offers", len(offers)).Debug("Received offers")
var offer *mesosproto.Offer
loop:
for len(offers) > 0 {
select {
case <-s.shutdown:
logrus.Info("Shutting down: declining offers")
break loop
case tid := <-s.tasks:
logrus.WithField("task_id", tid).Debug("Trying to find offer to launch task with")
t, _ := s.database.ReadUnmaskedTask(tid)
if t.IsTerminating() {
logrus.Debug("Dropping terminating task.")
t.UpdateStatus(eremetic.Status{
Status: eremetic.TaskKilled,
Time: time.Now().Unix(),
})
s.database.PutTask(&t)
continue
}
offer, offers = matchOffer(t, offers)
if offer == nil {
logrus.WithField("task_id", tid).Warn("Unable to find a matching offer")
tasksDelayed.Inc()
go func() { s.tasks <- tid }()
break loop
}
logrus.WithFields(logrus.Fields{
"task_id": tid,
"offer_id": offer.Id.GetValue(),
}).Debug("Preparing to launch task")
t, task := createTaskInfo(t, offer)
t.UpdateStatus(eremetic.Status{
Status: eremetic.TaskStaging,
Time: time.Now().Unix(),
})
s.database.PutTask(&t)
driver.LaunchTasks([]*mesosproto.OfferID{offer.Id}, []*mesosproto.TaskInfo{task}, defaultFilter)
tasksLaunched.Inc()
queueSize.Dec()
continue
default:
break loop
}
}
logrus.Debug("No tasks to launch. Declining offers.")
for _, offer := range offers {
driver.DeclineOffer(offer.Id, defaultFilter)
}
}
示例7: ResourceOffers
func (s *rancherScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
task := tasks.GetNextTask()
if task == nil {
for _, of := range offers {
driver.DeclineOffer(of.Id, defaultFilter)
}
return
}
if task.RegistrationUrl == "" {
tasks.AddTask(task)
for _, of := range offers {
driver.DeclineOffer(of.Id, defaultFilter)
}
return
}
taskBytes, err := task.Marshal()
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Error("Error Marshalling task")
for _, of := range offers {
driver.DeclineOffer(of.Id, defaultFilter)
}
return
}
for _, offer := range offers {
inadequate := false
for _, res := range offer.GetResources() {
if res.GetName() == "cpus" && *res.GetScalar().Value < taskCPUs {
driver.DeclineOffer(offer.Id, defaultFilter)
inadequate = true
continue
}
if res.GetName() == "mem" && *res.GetScalar().Value < taskMem {
driver.DeclineOffer(offer.Id, defaultFilter)
inadequate = true
continue
}
}
if inadequate {
continue
}
mesosTask := &mesos.TaskInfo{
TaskId: &mesos.TaskID{
Value: proto.String(task.HostUuid),
},
SlaveId: offer.SlaveId,
Resources: []*mesos.Resource{
mesosutil.NewScalarResource("cpus", taskCPUs),
mesosutil.NewScalarResource("mem", taskMem),
},
Data: taskBytes,
Name: &task.Name,
Executor: s.rancherExecutor,
}
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, []*mesos.TaskInfo{mesosTask}, defaultFilter)
}
}
示例8: ResourceOffers
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
for _, offer := range offers {
remainingCpus := getOfferCpu(offer)
remainingMems := getOfferMem(offer)
var tasks []*mesos.TaskInfo
for sched.cpuPerTask <= remainingCpus &&
sched.memPerTask <= remainingMems &&
sched.tasksLaunched < sched.totalTasks {
fmt.Printf("Tasks launched: %v Total tasks: %v\n", sched.tasksLaunched, sched.totalTasks)
sched.tasksLaunched++
taskId := &mesos.TaskID{
Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
}
dockerInfo := &mesos.ContainerInfo_DockerInfo{
Image: &sched.DockerImage,
PortMappings: sched.DockerPorts,
}
containerType := mesos.ContainerInfo_DOCKER
containerInfo := &mesos.ContainerInfo{
Type: &containerType,
Docker: dockerInfo,
}
commandInfo := &mesos.CommandInfo{
Value: &sched.DockerCommand,
}
task := &mesos.TaskInfo{
Name: proto.String("go-task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", sched.cpuPerTask),
util.NewScalarResource("mem", sched.memPerTask),
},
Container: containerInfo,
Command: commandInfo,
}
fmt.Printf("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
tasks = append(tasks, task)
remainingCpus -= sched.cpuPerTask
remainingMems -= sched.memPerTask
}
// fmt.Println("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
}
示例9: ResourceOffers
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
for _, offer := range offers {
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
}
ports := util.FilterResources(
offer.Resources,
func(res *mesos.Resource) bool {
return res.GetName() == "ports"
},
)
if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {
} else {
return
}
task := &mesos.TaskInfo{
Name: proto.String(taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Volumes: nil,
Hostname: nil,
Docker: &mesos.ContainerInfo_DockerInfo{
Image: &DOCKER_IMAGE_DEFAULT,
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
},
},
Command: &mesos.CommandInfo{
Shell: proto.Bool(true),
Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
},
Executor: nil,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", getOfferCpu(offer)),
util.NewScalarResource("mem", getOfferMem(offer)),
util.NewRangesResource("ports", []*mesos.Value_Range{
util.NewValueRange(
*ports[0].GetRanges().GetRange()[0].Begin,
*ports[0].GetRanges().GetRange()[0].Begin+1,
),
}),
},
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
sched.tasksLaunched++
time.Sleep(time.Second)
}
}
示例10: ResourceOffers
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
for _, offer := range offers {
cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "cpus"
})
cpus := 0.0
for _, res := range cpuResources {
cpus += res.GetScalar().GetValue()
}
memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "mem"
})
mems := 0.0
for _, res := range memResources {
mems += res.GetScalar().GetValue()
}
log.Infoln("Received Offer <", offer.Id.GetValue(), "> with cpus=", cpus, " mem=", mems)
remainingCpus := cpus
remainingMems := mems
var tasks []*mesos.TaskInfo
for sched.tasksLaunched < sched.totalTasks &&
CPUS_PER_TASK <= remainingCpus &&
MEM_PER_TASK <= remainingMems {
sched.tasksLaunched++
taskId := &mesos.TaskID{
Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
}
task := &mesos.TaskInfo{
Name: proto.String("go-task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Executor: sched.executor,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", CPUS_PER_TASK),
util.NewScalarResource("mem", MEM_PER_TASK),
},
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
tasks = append(tasks, task)
remainingCpus -= CPUS_PER_TASK
remainingMems -= MEM_PER_TASK
}
log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
}
示例11: LaunchTask
func (ctx *RunOnceApplicationContext) LaunchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) error {
ctx.lock.Lock()
defer ctx.lock.Unlock()
ctx.InstancesLeftToRun--
taskInfo := ctx.newTaskInfo(offer)
ctx.tasks = append(ctx.tasks, newRunOnceTask(offer, taskInfo.GetTaskId().GetValue()))
_, err := driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{taskInfo}, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
return err
}
示例12: ResourceOffers
// mesos.Scheduler interface method.
// Invoked when resources have been offered to this framework.
func (this *ElodinaTransportScheduler) ResourceOffers(driver scheduler.SchedulerDriver, offers []*mesos.Offer) {
log.Logger.Info("Received offers")
offersAndTasks := make(map[*mesos.Offer][]*mesos.TaskInfo)
remainingPartitions, err := this.GetTopicPartitions()
if err != nil {
return
}
remainingPartitions.RemoveAll(this.TakenTopicPartitions.GetArray())
log.Logger.Debug("%v", remainingPartitions)
tps := remainingPartitions.GetArray()
offersAndResources := this.wrapInOfferAndResources(offers)
for !remainingPartitions.IsEmpty() {
log.Logger.Debug("Iteration %v", remainingPartitions)
if this.hasEnoughInstances() {
for _, transfer := range this.taskIdToTaskState {
if len(transfer.assignment) < this.config.ThreadsPerTask {
transfer.assignment = append(transfer.assignment, tps[0])
remainingPartitions.Remove(tps[0])
this.TakenTopicPartitions.Add(tps[0])
if len(tps) > 1 {
tps = tps[1:]
} else {
tps = []consumer.TopicAndPartition{}
}
}
}
} else {
log.Logger.Debug("Trying to launch new task")
offer, task := this.launchNewTask(offersAndResources)
if offer != nil && task != nil {
offersAndTasks[offer] = append(offersAndTasks[offer], task)
} else {
for _, offer := range offers {
if _, exists := offersAndTasks[offer]; !exists {
offersAndTasks[offer] = make([]*mesos.TaskInfo, 0)
}
}
break
}
}
}
this.assignPendingPartitions()
for _, offer := range offers {
if tasks, ok := offersAndTasks[offer]; ok {
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
} else {
driver.DeclineOffer(offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(10)})
}
}
}
示例13: ResourceOffers
func (sc *SchedulerCore) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
sc.lock.Lock()
defer sc.lock.Unlock()
log.Info("Received resource offers: ", offers)
launchTasks := make(map[string][]*mesos.TaskInfo)
toBeScheduled := []*FrameworkRiakNode{}
for _, cluster := range sc.schedulerState.Clusters {
for _, riakNode := range cluster.Nodes {
if riakNode.NeedsToBeScheduled() {
log.Infof("Adding Riak node for scheduling: %+v", riakNode)
// We need to schedule this task I guess?
toBeScheduled = append(toBeScheduled, riakNode)
}
}
}
// Populate a mutable slice of offer resources
allResources := [][]*mesos.Resource{}
for _, offer := range offers {
allResources = append(allResources, offer.Resources)
}
launchTasks, err := sc.spreadNodesAcrossOffers(offers, allResources, toBeScheduled, 0, 0, launchTasks)
if err != nil {
log.Error(err)
}
for _, offer := range offers {
tasks := launchTasks[*offer.Id.Value]
if tasks == nil {
tasks = []*mesos.TaskInfo{}
}
// This is somewhat of a hack, to avoid synchronously calling the mesos-go SDK
// to avoid a deadlock situation.
// TODO: Fix and make actually queues around driver interactions
// This is a massive hack
// -Sargun Dhillon 2015-10-01
go func(offer *mesos.Offer) {
log.Infof("Launching Tasks: %v for offer %v", tasks, *offer.Id.Value)
status, err := driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
if status != mesos.Status_DRIVER_RUNNING {
log.Fatal("Driver not running, while trying to launch tasks")
}
if err != nil {
log.Panic("Failed to launch tasks: ", err)
}
}(offer)
}
}
示例14: ResourceOffers
func (s *DiegoScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
s.offersLock.Lock()
defer s.offersLock.Unlock()
if s.holdOffer {
s.offers = append(s.offers, offers...)
} else {
offerIds := extractOfferIds(offers)
driver.LaunchTasks(offerIds, nil, &mesos.Filters{RefuseSeconds: proto.Float64(30)})
}
}
示例15: ResourceOffers
func (sched *ScraperScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
for _, offer := range offers {
if sched.tasksLaunched >= sched.totalTasks || len(sched.urls) == 0 {
log.Infof("Declining offer %s", offer.Id.GetValue())
driver.DeclineOffer(offer.Id, &mesos.Filters{})
continue
}
remainingCpus := getOfferCpu(offer)
remainingMems := getOfferMem(offer)
var tasks []*mesos.TaskInfo
for sched.cpuPerTask <= remainingCpus &&
sched.memPerTask <= remainingMems &&
sched.tasksLaunched < sched.totalTasks {
log.Infof("Processing url %v of %v\n", sched.tasksLaunched, sched.totalTasks)
log.Infof("Total Tasks: %d", sched.totalTasks)
log.Infof("Tasks Launched: %d", sched.tasksLaunched)
uri := sched.urls[sched.tasksLaunched]
log.Infof("URI: %s", uri)
sched.tasksLaunched++
taskId := &mesos.TaskID{
Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
}
task := &mesos.TaskInfo{
Name: proto.String("go-task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Executor: sched.executor,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", sched.cpuPerTask),
util.NewScalarResource("mem", sched.memPerTask),
},
Data: []byte(uri),
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
tasks = append(tasks, task)
remainingCpus -= sched.cpuPerTask
remainingMems -= sched.memPerTask
}
log.Infoln("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
}