本文整理匯總了Golang中github.com/mesos/mesos-go/scheduler.SchedulerDriver類的典型用法代碼示例。如果您正苦於以下問題:Golang SchedulerDriver類的具體用法?Golang SchedulerDriver怎麽用?Golang SchedulerDriver使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了SchedulerDriver類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Reregistered
func (sched *ExampleScheduler) Reregistered(driver sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
log.Infoln("Framework Re-Registered with Master ", masterInfo)
_, err := driver.ReconcileTasks([]*mesos.TaskStatus{})
if err != nil {
log.Errorf("failed to request task reconciliation: %v", err)
}
}
示例2: StatusUpdate
func (sched *MesosRunonceScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
log.V(1).Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
eventCh <- status
if status.GetState() == mesos.TaskState_TASK_FINISHED {
sched.tasksFinished++
}
if sched.tasksFinished >= sched.totalTasks {
log.V(1).Infoln("Total tasks completed, stopping framework.")
driver.Stop(false)
}
if status.GetState() == mesos.TaskState_TASK_LOST ||
status.GetState() == mesos.TaskState_TASK_KILLED ||
status.GetState() == mesos.TaskState_TASK_FAILED ||
status.GetState() == mesos.TaskState_TASK_ERROR {
exitStatus = 1
log.Warningf("mesos TaskStatus: %v", status)
driver.Stop(false)
log.Errorln(
"Aborting because task", status.TaskId.GetValue(),
"is in unexpected state", status.State.String(),
"with message.", status.GetMessage(),
)
}
}
示例3: ResourceOffers
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
jobs, err := getLaunchableJobs()
if err != nil {
log.Errorf("Unable to get pending jobs! %s\n", err.Error())
return
}
offersAndTasks, err := packJobsInOffers(jobs, offers)
if err != nil {
log.Errorf("Unable to pack jobs into offers! %s\n", err.Error())
return
}
for _, ot := range offersAndTasks {
if len(ot.Tasks) == 0 {
log.Infof("Declining unused offer %s", ot.Offer.Id.GetValue())
driver.DeclineOffer(ot.Offer.Id, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
continue
} else {
log.Infof("Launching %d tasks for offer %s\n", len(ot.Tasks), ot.Offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{ot.Offer.Id}, ot.Tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
sched.tasksLaunched = sched.tasksLaunched + len(ot.Tasks)
}
}
}
示例4: acceptOffer
func (sc *SchedulerCore) acceptOffer(driver sched.SchedulerDriver, offer *mesos.Offer, operations []*mesos.Offer_Operation) {
log.Infof("Accepting OfferID: %+v, Operations: %+v", *offer.Id.Value, operations)
var status mesos.Status
var err error
if sc.compatibilityMode {
tasks := []*mesos.TaskInfo{}
for _, operation := range operations {
if *operation.Type == mesos.Offer_Operation_LAUNCH {
tasks = operation.Launch.TaskInfos
}
}
status, err = driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
} else {
status, err = driver.AcceptOffers([]*mesos.OfferID{offer.Id}, operations, &mesos.Filters{RefuseSeconds: proto.Float64(OFFER_INTERVAL)})
}
if status != mesos.Status_DRIVER_RUNNING {
log.Fatal("Driver not running, while trying to accept offers")
}
if err != nil {
log.Panic("Failed to launch tasks: ", err)
}
}
示例5: onInitialRegistration
// perform one-time initialization actions upon the first registration event received from Mesos.
func (k *framework) onInitialRegistration(driver bindings.SchedulerDriver) {
defer close(k.registration)
if k.failoverTimeout > 0 {
refreshInterval := k.schedulerConfig.FrameworkIdRefreshInterval.Duration
if k.failoverTimeout < k.schedulerConfig.FrameworkIdRefreshInterval.Duration.Seconds() {
refreshInterval = time.Duration(math.Max(1, k.failoverTimeout/2)) * time.Second
}
// wait until we've written the framework ID at least once before proceeding
firstStore := make(chan struct{})
go runtime.Until(func() {
// only close firstStore once
select {
case <-firstStore:
default:
defer close(firstStore)
}
err := k.storeFrameworkId(context.TODO(), k.frameworkId.GetValue())
if err != nil {
log.Errorf("failed to store framework ID: %v", err)
if err == frameworkid.ErrMismatch {
// we detected a framework ID in storage that doesn't match what we're trying
// to save. this is a dangerous state:
// (1) perhaps we failed to initially recover the framework ID and so mesos
// issued us a new one. now that we're trying to save it there's a mismatch.
// (2) we've somehow bungled the framework ID and we're out of alignment with
// what mesos is expecting.
// (3) multiple schedulers were launched at the same time, and both have
// registered with mesos (because when they each checked, there was no ID in
// storage, so they asked for a new one). one of them has already written the
// ID to storage -- we lose.
log.Error("aborting due to framework ID mismatch")
driver.Abort()
}
}
}, refreshInterval, k.terminate)
// wait for the first store attempt of the framework ID
select {
case <-firstStore:
case <-k.terminate:
}
}
r1 := k.makeTaskRegistryReconciler()
r2 := k.makePodRegistryReconciler()
k.tasksReconciler = taskreconciler.New(k.asRegisteredMaster, taskreconciler.MakeComposite(k.terminate, r1, r2),
k.reconcileCooldown, k.schedulerConfig.ExplicitReconciliationAbortTimeout.Duration, k.terminate)
go k.tasksReconciler.Run(driver, k.terminate)
if k.reconcileInterval > 0 {
ri := time.Duration(k.reconcileInterval) * time.Second
time.AfterFunc(k.schedulerConfig.InitialImplicitReconciliationDelay.Duration, func() { runtime.Until(k.tasksReconciler.RequestImplicit, ri, k.terminate) })
log.Infof("will perform implicit task reconciliation at interval: %v after %v", ri, k.schedulerConfig.InitialImplicitReconciliationDelay.Duration)
}
k.installDebugHandlers(k.mux)
}
示例6: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer, tcpPort, udpPort),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
},
Data: data,
Labels: utils.StringToLabels(s.labels),
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
示例7: StatusUpdate
func (sched *NoneScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
taskId := status.GetTaskId().GetValue()
log.Infoln("Status update: task", taskId, "is in state", status.State.Enum().String())
c := sched.queue.GetCommandById(taskId)
if c == nil {
log.Errorln("Unable to find command for task", taskId)
driver.Abort()
}
if c.Status.GetState() == status.GetState() {
// ignore repeated status updates
return
}
c.Status = status
// send status update to CommandHandler
if status.GetState() == mesos.TaskState_TASK_RUNNING {
sched.handler.CommandRunning(c)
} else if status.GetState() == mesos.TaskState_TASK_FINISHED {
sched.handler.CommandEnded(c)
sched.handler.CommandFinished(c)
} else if status.GetState() == mesos.TaskState_TASK_FAILED ||
status.GetState() == mesos.TaskState_TASK_LOST ||
status.GetState() == mesos.TaskState_TASK_KILLED {
sched.handler.CommandEnded(c)
sched.handler.CommandFailed(c)
}
// stop if Commands channel was closed and all tasks are finished
if sched.queue.Closed() && !sched.handler.HasRunningTasks() {
log.Infoln("All tasks finished, stopping framework.")
sched.handler.FinishAllCommands()
driver.Stop(false)
}
}
示例8: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syscol-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer.GetSlaveId().GetValue()),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
},
Data: data,
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
示例9: StatusUpdate
func (sched *SdcScheduler) StatusUpdate(driver sched.SchedulerDriver, status *mesos.TaskStatus) {
log.Infoln("Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
if status.GetState() == mesos.TaskState_TASK_FINISHED {
sched.tasksFinished++
// KillTaskを実行するとTASK_LOSTが検知され、フレームワークが止まる
// driver.KillTask(status.TaskId)
// log.Infoln("!! Status update: task", status.TaskId.GetValue(), " is in state ", status.State.Enum().String())
// return
}
if sched.tasksFinished >= sched.totalTasks {
// log.Infoln("Total tasks completed, stopping framework.")
log.Infoln("Total tasks completed.")
sched.tasksFinished = 0
sched.totalTasks = 0
sched.tasksLaunched = 0
// driver.Stop(false)
}
if status.GetState() == mesos.TaskState_TASK_LOST ||
status.GetState() == mesos.TaskState_TASK_KILLED ||
status.GetState() == mesos.TaskState_TASK_FAILED ||
status.GetState() == mesos.TaskState_TASK_ERROR {
log.Infoln(
"Aborting because task", status.TaskId.GetValue(),
"is in unexpected state", status.State.String(),
"with message", status.GetMessage(),
)
driver.Abort()
}
}
示例10: ResourceOffers
// ResourceOffers handles the Resource Offers
func (s *Scheduler) ResourceOffers(driver mesossched.SchedulerDriver, offers []*mesosproto.Offer) {
logrus.WithField("offers", len(offers)).Debug("Received offers")
var offer *mesosproto.Offer
loop:
for len(offers) > 0 {
select {
case <-s.shutdown:
logrus.Info("Shutting down: declining offers")
break loop
case tid := <-s.tasks:
logrus.WithField("task_id", tid).Debug("Trying to find offer to launch task with")
t, _ := s.database.ReadUnmaskedTask(tid)
if t.IsTerminating() {
logrus.Debug("Dropping terminating task.")
t.UpdateStatus(eremetic.Status{
Status: eremetic.TaskKilled,
Time: time.Now().Unix(),
})
s.database.PutTask(&t)
continue
}
offer, offers = matchOffer(t, offers)
if offer == nil {
logrus.WithField("task_id", tid).Warn("Unable to find a matching offer")
tasksDelayed.Inc()
go func() { s.tasks <- tid }()
break loop
}
logrus.WithFields(logrus.Fields{
"task_id": tid,
"offer_id": offer.Id.GetValue(),
}).Debug("Preparing to launch task")
t, task := createTaskInfo(t, offer)
t.UpdateStatus(eremetic.Status{
Status: eremetic.TaskStaging,
Time: time.Now().Unix(),
})
s.database.PutTask(&t)
driver.LaunchTasks([]*mesosproto.OfferID{offer.Id}, []*mesosproto.TaskInfo{task}, defaultFilter)
tasksLaunched.Inc()
queueSize.Dec()
continue
default:
break loop
}
}
logrus.Debug("No tasks to launch. Declining offers.")
for _, offer := range offers {
driver.DeclineOffer(offer.Id, defaultFilter)
}
}
示例11: failover
func (s *SchedulerServer) failover(driver bindings.SchedulerDriver, hks hyperkube.Interface) error {
if driver != nil {
stat, err := driver.Stop(true)
if stat != mesos.Status_DRIVER_STOPPED {
return fmt.Errorf("failed to stop driver for failover, received unexpected status code: %v", stat)
} else if err != nil {
return err
}
}
// there's no guarantee that all goroutines are actually programmed intelligently with 'done'
// signals, so we'll need to restart if we want to really stop everything
// run the same command that we were launched with
//TODO(jdef) assumption here is that the sheduler is the only service running in this process, we should probably validate that somehow
args := []string{}
flags := pflag.CommandLine
if hks != nil {
args = append(args, hks.Name())
flags = hks.Flags()
}
flags.Visit(func(flag *pflag.Flag) {
if flag.Name != "api-servers" && flag.Name != "etcd-servers" {
args = append(args, fmt.Sprintf("--%s=%s", flag.Name, flag.Value.String()))
}
})
if !s.Graceful {
args = append(args, "--graceful")
}
if len(s.APIServerList) > 0 {
args = append(args, "--api-servers="+strings.Join(s.APIServerList, ","))
}
if len(s.EtcdServerList) > 0 {
args = append(args, "--etcd-servers="+strings.Join(s.EtcdServerList, ","))
}
args = append(args, flags.Args()...)
log.V(1).Infof("spawning scheduler for graceful failover: %s %+v", s.executable, args)
cmd := exec.Command(s.executable, args...)
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = makeDisownedProcAttr()
// TODO(jdef) pass in a pipe FD so that we can block, waiting for the child proc to be ready
//cmd.ExtraFiles = []*os.File{}
exitcode := 0
log.Flush() // TODO(jdef) it would be really nice to ensure that no one else in our process was still logging
if err := cmd.Start(); err != nil {
//log to stdtout here to avoid conflicts with normal stderr logging
fmt.Fprintf(os.Stdout, "failed to spawn failover process: %v\n", err)
os.Exit(1)
}
os.Exit(exitcode)
select {} // will never reach here
}
示例12: Registered
// Registered is called when the Scheduler is Registered
func (s *eremeticScheduler) Registered(driver sched.SchedulerDriver, frameworkID *mesos.FrameworkID, masterInfo *mesos.MasterInfo) {
log.Debugf("Framework %s registered with master %s", frameworkID.GetValue(), masterInfo.GetHostname())
if !s.initialised {
driver.ReconcileTasks([]*mesos.TaskStatus{})
s.initialised = true
} else {
s.Reconcile(driver)
}
}
示例13: Reregistered
// Reregistered is called when the Scheduler is Reregistered
func (s *eremeticScheduler) Reregistered(driver sched.SchedulerDriver, masterInfo *mesos.MasterInfo) {
log.Debugf("Framework re-registered with master %s", masterInfo)
if !s.initialised {
driver.ReconcileTasks([]*mesos.TaskStatus{})
s.initialised = true
} else {
s.Reconcile(driver)
}
}
示例14: ResourceOffers
func (sched *ExampleScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
logOffers(offers)
for _, offer := range offers {
remainingCpus := getOfferCpu(offer)
remainingMems := getOfferMem(offer)
var tasks []*mesos.TaskInfo
for sched.cpuPerTask <= remainingCpus &&
sched.memPerTask <= remainingMems &&
sched.tasksLaunched < sched.totalTasks {
fmt.Printf("Tasks launched: %v Total tasks: %v\n", sched.tasksLaunched, sched.totalTasks)
sched.tasksLaunched++
taskId := &mesos.TaskID{
Value: proto.String(strconv.Itoa(sched.tasksLaunched)),
}
dockerInfo := &mesos.ContainerInfo_DockerInfo{
Image: &sched.DockerImage,
PortMappings: sched.DockerPorts,
}
containerType := mesos.ContainerInfo_DOCKER
containerInfo := &mesos.ContainerInfo{
Type: &containerType,
Docker: dockerInfo,
}
commandInfo := &mesos.CommandInfo{
Value: &sched.DockerCommand,
}
task := &mesos.TaskInfo{
Name: proto.String("go-task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", sched.cpuPerTask),
util.NewScalarResource("mem", sched.memPerTask),
},
Container: containerInfo,
Command: commandInfo,
}
fmt.Printf("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
tasks = append(tasks, task)
remainingCpus -= sched.cpuPerTask
remainingMems -= sched.memPerTask
}
// fmt.Println("Launching ", len(tasks), "tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
}
示例15: ResourceOffers
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
for _, offer := range offers {
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
}
ports := util.FilterResources(
offer.Resources,
func(res *mesos.Resource) bool {
return res.GetName() == "ports"
},
)
if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {
} else {
return
}
task := &mesos.TaskInfo{
Name: proto.String(taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Volumes: nil,
Hostname: nil,
Docker: &mesos.ContainerInfo_DockerInfo{
Image: &DOCKER_IMAGE_DEFAULT,
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
},
},
Command: &mesos.CommandInfo{
Shell: proto.Bool(true),
Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
},
Executor: nil,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", getOfferCpu(offer)),
util.NewScalarResource("mem", getOfferMem(offer)),
util.NewRangesResource("ports", []*mesos.Value_Range{
util.NewValueRange(
*ports[0].GetRanges().GetRange()[0].Begin,
*ports[0].GetRanges().GetRange()[0].Begin+1,
),
}),
},
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
sched.tasksLaunched++
time.Sleep(time.Second)
}
}