本文整理汇总了Golang中github.com/mesos/mesos-go/mesosproto.TaskID.GetValue方法的典型用法代码示例。如果您正苦于以下问题:Golang TaskID.GetValue方法的具体用法?Golang TaskID.GetValue怎么用?Golang TaskID.GetValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类github.com/mesos/mesos-go/mesosproto.TaskID
的用法示例。
在下文中一共展示了TaskID.GetValue方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: tryKillTask
func (this *ElodinaTransportScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
log.Logger.Info("Trying to kill task %s", taskId.GetValue())
var err error
for i := 0; i <= this.config.KillTaskRetries; i++ {
if _, err = driver.KillTask(taskId); err == nil {
return nil
}
}
return err
}
示例2: tryKillTask
func (this *TransformScheduler) tryKillTask(driver scheduler.SchedulerDriver, taskId *mesos.TaskID) error {
fmt.Printf("Trying to kill task %s\n", taskId.GetValue())
var err error
for i := 0; i <= this.config.KillTaskRetries; i++ {
if _, err = driver.KillTask(taskId); err == nil {
return nil
}
}
return err
}
示例3: KillTask
// KillTask is called when the executor receives a request to kill a task.
func (k *KubernetesExecutor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
if k.isDone() {
return
}
log.Infof("Kill task %v\n", taskId)
if !k.isConnected() {
//TODO(jdefelice) sent TASK_LOST here?
log.Warningf("Ignore kill task because the executor is disconnected\n")
return
}
k.lock.Lock()
defer k.lock.Unlock()
k.removePodTask(driver, taskId.GetValue(), messages.TaskKilled, mesos.TaskState_TASK_KILLED)
}
示例4: KillTask
// KillTask is called when the executor receives a request to kill a task.
func (k *Executor) KillTask(driver bindings.ExecutorDriver, taskId *mesos.TaskID) {
k.killPodTask(driver, taskId.GetValue())
}
示例5: KillTask
func (e *Executor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
Logger.Infof("[KillTask] %s", id.GetValue())
e.stop()
}
示例6: removeTask
func (this *ElodinaTransportScheduler) removeTask(id *mesos.TaskID) {
delete(this.taskIdToTaskState, id.GetValue())
}
示例7: KillTask
func (e *Executor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
Logger.Infof("[KillTask] %s", id.GetValue())
e.producer.Stop()
e.close <- struct{}{}
}
示例8: KillTask
func (e *MirrorMakerExecutor) KillTask(driver executor.ExecutorDriver, id *mesos.TaskID) {
Logger.Infof("[KillTask] %s", id.GetValue())
e.mirrorMaker.Stop()
}
示例9: ResourceOffers
func (s *MinerScheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
for i, offer := range offers {
memResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "mem"
})
mems := 0.0
for _, res := range memResources {
mems += res.GetScalar().GetValue()
}
cpuResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "cpus"
})
cpus := 0.0
for _, res := range cpuResources {
cpus += res.GetScalar().GetValue()
}
portsResources := util.FilterResources(offer.Resources, func(res *mesos.Resource) bool {
return res.GetName() == "ports"
})
var ports uint64
for _, res := range portsResources {
port_ranges := res.GetRanges().GetRange()
for _, port_range := range port_ranges {
ports += port_range.GetEnd() - port_range.GetBegin()
}
}
// If a miner server is running, we start a new miner daemon. Otherwise, we start a new miner server.
tasks := make([]*mesos.TaskInfo, 0)
if !s.minerServerRunning && mems >= MEM_PER_SERVER_TASK && cpus >= CPU_PER_SERVER_TASK && ports >= 2 {
var taskId *mesos.TaskID
var task *mesos.TaskInfo
// we need two ports
var p2pool_port uint64
var worker_port uint64
// A rather stupid algorithm for picking two ports
// The difficulty here is that a range might only include one port,
// in which case we will need to pick another port from another range.
for _, res := range portsResources {
r := res.GetRanges().GetRange()[0]
begin := r.GetBegin()
end := r.GetEnd()
if p2pool_port == 0 {
p2pool_port = begin
if worker_port == 0 && (begin+1) <= end {
worker_port = begin + 1
break
}
continue
}
if worker_port == 0 {
worker_port = begin
break
}
}
taskId = &mesos.TaskID{
Value: proto.String("miner-server-" + strconv.Itoa(i)),
}
containerType := mesos.ContainerInfo_DOCKER
task = &mesos.TaskInfo{
Name: proto.String("task-" + taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Container: &mesos.ContainerInfo{
Type: &containerType,
Docker: &mesos.ContainerInfo_DockerInfo{
Image: proto.String(MINER_SERVER_DOCKER_IMAGE),
},
},
Command: &mesos.CommandInfo{
Shell: proto.Bool(false),
Arguments: []string{
// these arguments will be passed to run_p2pool.py
"--bitcoind-address", *bitcoindAddr,
"--p2pool-port", strconv.Itoa(int(p2pool_port)),
"-w", strconv.Itoa(int(worker_port)),
s.rpc_user, s.rpc_pass,
},
},
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", CPU_PER_SERVER_TASK),
util.NewScalarResource("mem", MEM_PER_SERVER_TASK),
},
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
cpus -= CPU_PER_SERVER_TASK
mems -= MEM_PER_SERVER_TASK
// update state
s.minerServerHostname = offer.GetHostname()
s.minerServerRunning = true
s.minerServerPort = int(worker_port)
tasks = append(tasks, task)
//.........这里部分代码省略.........