本文整理匯總了Golang中github.com/mesos/mesos-go/mesosutil.NewRangesResource函數的典型用法代碼示例。如果您正苦於以下問題:Golang NewRangesResource函數的具體用法?Golang NewRangesResource怎麽用?Golang NewRangesResource使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了NewRangesResource函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: AskForPorts
func AskForPorts(portCount int) ResourceAsker {
ret := func(resources []*mesos.Resource) ([]*mesos.Resource, *mesos.Resource, bool) {
newResources := make([]*mesos.Resource, len(resources))
copy(newResources, resources)
for idx, resource := range resources {
if resource.GetName() == "ports" {
ports := RangesToArray(resource.GetRanges().GetRange())
// Now we have to see if there are N ports
if len(ports) >= portCount {
var sliceLoc int
// Calculate the slice where I'm taking ports:
if len(ports)-portCount == 0 {
sliceLoc = 0
} else {
sliceLoc = rand.Intn(len(ports) - portCount)
}
takingPorts := make([]int64, portCount)
copy(takingPorts, ports[sliceLoc:(sliceLoc+portCount)])
leavingPorts := make([]int64, len(ports)-portCount)
copy(leavingPorts, ports[:sliceLoc])
copy(leavingPorts[sliceLoc:], ports[(sliceLoc+portCount):])
newResources[idx] = util.NewRangesResource("ports", ArrayToRanges(leavingPorts))
ask := util.NewRangesResource("ports", ArrayToRanges(takingPorts))
return newResources, ask, true
}
}
}
return resources, nil, false
}
return ret
}
示例2: launchTask
func (s *Scheduler) launchTask(driver scheduler.SchedulerDriver, offer *mesos.Offer) {
taskName := fmt.Sprintf("syslog-%s", offer.GetSlaveId().GetValue())
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("%s-%s", taskName, uuid())),
}
data, err := json.Marshal(Config)
if err != nil {
panic(err) //shouldn't happen
}
Logger.Debugf("Task data: %s", string(data))
tcpPort := uint64(s.getPort(Config.TcpPort, offer, -1))
udpPort := uint64(s.getPort(Config.UdpPort, offer, int(tcpPort)))
task := &mesos.TaskInfo{
Name: proto.String(taskName),
TaskId: taskId,
SlaveId: offer.GetSlaveId(),
Executor: s.createExecutor(offer, tcpPort, udpPort),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", Config.Cpus),
util.NewScalarResource("mem", Config.Mem),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(tcpPort, tcpPort)}),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(udpPort, udpPort)}),
},
Data: data,
Labels: utils.StringToLabels(s.labels),
}
s.cluster.Add(offer.GetSlaveId().GetValue(), task)
driver.LaunchTasks([]*mesos.OfferID{offer.GetId()}, []*mesos.TaskInfo{task}, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
}
示例3: apply
func (offerHelper *OfferHelper) apply(against *ResourceGroup, cpus float64, mem float64, disk float64, ports int,
principal string, role string, persistenceID string, containerPath string) []*mesos.Resource {
ask := []*mesos.Resource{}
if cpus > 0 {
against.Cpus = against.Cpus - cpus
if principal != "" && role != "" {
ask = append(ask, util.NewScalarResourceWithReservation("cpus", cpus, principal, role))
} else {
ask = append(ask, util.NewScalarResource("cpus", cpus))
}
}
if mem > 0 {
against.Mem = against.Mem - mem
if principal != "" && role != "" {
ask = append(ask, util.NewScalarResourceWithReservation("mem", mem, principal, role))
} else {
ask = append(ask, util.NewScalarResource("mem", mem))
}
}
if disk > 0 {
against.Disk = against.Disk - disk
if principal != "" && role != "" && containerPath != "" && persistenceID != "" {
ask = append(ask, util.NewVolumeResourceWithReservation(disk, containerPath, persistenceID, mesos.Volume_RW.Enum(), principal, role))
} else if principal != "" && role != "" {
ask = append(ask, util.NewScalarResourceWithReservation("disk", disk, principal, role))
} else {
ask = append(ask, util.NewScalarResource("disk", disk))
}
}
if ports > 0 {
sliceLoc := 0
if len(against.Ports)-ports > 0 {
sliceLoc = rand.Intn(len(against.Ports) - ports)
}
takingPorts := make([]int64, ports)
copy(takingPorts, against.Ports[sliceLoc:(sliceLoc+ports)])
leavingPorts := make([]int64, len(against.Ports)-ports)
copy(leavingPorts, against.Ports[:sliceLoc])
copy(leavingPorts[sliceLoc:], against.Ports[(sliceLoc+ports):])
against.Ports = leavingPorts
if principal != "" && role != "" {
ask = append(ask, util.AddResourceReservation(util.NewRangesResource("ports", ArrayToRanges(takingPorts)), principal, role))
} else {
ask = append(ask, util.NewRangesResource("ports", ArrayToRanges(takingPorts)))
}
}
return ask
}
示例4: TestGoodPortAsk
func TestGoodPortAsk(t *testing.T) {
rand.Seed(10)
assert := assert.New(t)
offer := generateResourceOffer()
askFun := AskForPorts(100)
remaining, resourceAsk, success := askFun(offer)
assert.Equal(true, success)
assert.Equal(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31105, 31204)}), resourceAsk)
remainingPorts := util.FilterResources(remaining, func(res *mesos.Resource) bool {
return res.GetName() == "ports"
})
assert.Equal([]*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31104), util.NewValueRange(31205, 32000)})}, remainingPorts)
}
示例5: createTaskInfo
func createTaskInfo(task eremetic.Task, offer *mesosproto.Offer) (eremetic.Task, *mesosproto.TaskInfo) {
task.FrameworkID = *offer.FrameworkId.Value
task.SlaveID = *offer.SlaveId.Value
task.Hostname = *offer.Hostname
task.AgentIP = offer.GetUrl().GetAddress().GetIp()
task.AgentPort = offer.GetUrl().GetAddress().GetPort()
portMapping, portResources := buildPorts(task, offer)
env := buildEnvironment(task, portMapping)
taskInfo := &mesosproto.TaskInfo{
TaskId: &mesosproto.TaskID{Value: proto.String(task.ID)},
SlaveId: offer.SlaveId,
Name: proto.String(task.Name),
Command: buildCommandInfo(task, env),
Container: &mesosproto.ContainerInfo{
Type: mesosproto.ContainerInfo_DOCKER.Enum(),
Docker: &mesosproto.ContainerInfo_DockerInfo{
Image: proto.String(task.Image),
ForcePullImage: proto.Bool(task.ForcePullImage),
PortMappings: portMapping,
Network: mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum(),
},
Volumes: buildVolumes(task),
},
Resources: []*mesosproto.Resource{
mesosutil.NewScalarResource("cpus", task.TaskCPUs),
mesosutil.NewScalarResource("mem", task.TaskMem),
mesosutil.NewRangesResource("ports", portResources),
},
}
return task, taskInfo
}
示例6: newTaskPrototype
func (s *MemcacheScheduler) newTaskPrototype(offer *mesos.Offer) *mesos.TaskInfo {
taskID := s.tasksCreated
s.tasksCreated++
portRange := getPortRange(offer)
portRange.End = portRange.Begin
return &mesos.TaskInfo{
TaskId: &mesos.TaskID{
Value: proto.String(fmt.Sprintf("Memcache-%d", taskID)),
},
SlaveId: offer.SlaveId,
Resources: []*mesos.Resource{
mesosutil.NewScalarResource("cpus", TASK_CPUS),
mesosutil.NewScalarResource("mem", TASK_MEM),
mesosutil.NewRangesResource("ports", []*mesos.Value_Range{portRange}),
},
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Docker: &mesos.ContainerInfo_DockerInfo{
Image: &util.MEMCACHE_CONTAINER,
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
PortMappings: []*mesos.ContainerInfo_DockerInfo_PortMapping{
&mesos.ContainerInfo_DockerInfo_PortMapping{
ContainerPort: &MEMCACHE_PORT,
Protocol: &MEMCACHE_PROTOCOL,
HostPort: &MEMCACHE_HOST_PORT,
},
},
},
},
}
}
示例7: TestResource
func TestResource(t *testing.T) {
mem := Resource(util.NewScalarResource("mem", 512))
if mem != "mem:512.00" {
t.Errorf(`Resource(util.NewScalarResource("mem", 512)) != "mem:512.00"; actual %s`, mem)
}
ports := Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}))
if ports != "ports:[31000..32000]" {
t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)})) != "ports:[31000..32000]"; actual %s`, ports)
}
ports = Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)}))
if ports != "ports:[4000..7000][31000..32000]" {
t.Errorf(`Resource(util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(4000, 7000), util.NewValueRange(31000, 32000)})) != "ports:[4000..7000][31000..32000]"; actual %s`, ports)
}
}
示例8: TestBadPortAsk
func TestBadPortAsk(t *testing.T) {
assert := assert.New(t)
offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
_, _, success := AskForPorts(100)(offer)
assert.Equal(false, success)
}
示例9: TestTotalPortAsk
func TestTotalPortAsk(t *testing.T) {
assert := assert.New(t)
askfun := AskForPorts(1)
offer := []*mesos.Resource{util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 31000)})}
newOffer, _, success := askfun(offer)
newOffer[0].GetRanges().GetRange()
assert.Equal(0, len(newOffer[0].GetRanges().GetRange()))
assert.Equal(true, success)
}
示例10: generateResourceOffer
func generateResourceOffer() []*mesos.Resource {
val := []*mesos.Resource{
util.NewScalarResource("cpus", 3),
util.NewScalarResource("disk", 73590),
util.NewScalarResource("mem", 1985),
util.NewRangesResource("ports", []*mesos.Value_Range{util.NewValueRange(31000, 32000)}),
}
return val
}
示例11: ResourceOffers
func (sched *Scheduler) ResourceOffers(driver sched.SchedulerDriver, offers []*mesos.Offer) {
for _, offer := range offers {
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("basicdocker-task-%d", time.Now().Unix())),
}
ports := util.FilterResources(
offer.Resources,
func(res *mesos.Resource) bool {
return res.GetName() == "ports"
},
)
if len(ports) > 0 && len(ports[0].GetRanges().GetRange()) > 0 {
} else {
return
}
task := &mesos.TaskInfo{
Name: proto.String(taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.SlaveId,
Container: &mesos.ContainerInfo{
Type: mesos.ContainerInfo_DOCKER.Enum(),
Volumes: nil,
Hostname: nil,
Docker: &mesos.ContainerInfo_DockerInfo{
Image: &DOCKER_IMAGE_DEFAULT,
Network: mesos.ContainerInfo_DockerInfo_BRIDGE.Enum(),
},
},
Command: &mesos.CommandInfo{
Shell: proto.Bool(true),
Value: proto.String("set -x ; /bin/date ; /bin/hostname ; sleep 200 ; echo done"),
},
Executor: nil,
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", getOfferCpu(offer)),
util.NewScalarResource("mem", getOfferMem(offer)),
util.NewRangesResource("ports", []*mesos.Value_Range{
util.NewValueRange(
*ports[0].GetRanges().GetRange()[0].Begin,
*ports[0].GetRanges().GetRange()[0].Begin+1,
),
}),
},
}
log.Infof("Prepared task: %s with offer %s for launch\n", task.GetName(), offer.Id.GetValue())
var tasks []*mesos.TaskInfo = []*mesos.TaskInfo{task}
log.Infoln("Launching ", len(tasks), " tasks for offer", offer.Id.GetValue())
driver.LaunchTasks([]*mesos.OfferID{offer.Id}, tasks, &mesos.Filters{RefuseSeconds: proto.Float64(1)})
sched.tasksLaunched++
time.Sleep(time.Second)
}
}
示例12: NewTestOffer
// Offering some cpus and memory and the 8000-9000 port range
func NewTestOffer(id string) *mesos.Offer {
hostname := "some_hostname"
cpus := util.NewScalarResource("cpus", 3.75)
mem := util.NewScalarResource("mem", 940)
var port8000 uint64 = 8000
var port9000 uint64 = 9000
ports8000to9000 := mesos.Value_Range{Begin: &port8000, End: &port9000}
ports := util.NewRangesResource("ports", []*mesos.Value_Range{&ports8000to9000})
return &mesos.Offer{
Id: util.NewOfferID(id),
Hostname: &hostname,
SlaveId: util.NewSlaveID(hostname),
Resources: []*mesos.Resource{cpus, mem, ports},
}
}
示例13: NewOffer
func NewOffer(id string) *mesos.Offer {
return &mesos.Offer{
Id: util.NewOfferID(id),
FrameworkId: util.NewFrameworkID("test-etcd-framework"),
SlaveId: util.NewSlaveID("slave-" + id),
Hostname: proto.String("localhost"),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", 1),
util.NewScalarResource("mem", 256),
util.NewScalarResource("disk", 4096),
util.NewRangesResource("ports", []*mesos.Value_Range{
util.NewValueRange(uint64(0), uint64(65535)),
}),
},
}
}
示例14: launchNewTask
func (this *ElodinaTransportScheduler) launchNewTask(offers []*OfferAndResources) (*mesos.Offer, *mesos.TaskInfo) {
for _, offer := range offers {
configBlob, err := json.Marshal(this.config.ConsumerConfig)
if err != nil {
break
}
log.Logger.Debug("%v", offer)
if this.hasEnoughResources(offer) {
port := this.takePort(&offer.RemainingPorts)
taskPort := &mesos.Value_Range{Begin: port, End: port}
taskId := &mesos.TaskID{
Value: proto.String(fmt.Sprintf("elodina-mirror-%s-%d", *offer.Offer.Hostname, *port)),
}
cpuTaken := this.config.CpuPerTask * float64(this.config.ThreadsPerTask)
memoryTaken := this.config.MemPerTask * float64(this.config.ThreadsPerTask)
task := &mesos.TaskInfo{
Name: proto.String(taskId.GetValue()),
TaskId: taskId,
SlaveId: offer.Offer.SlaveId,
Executor: this.createExecutor(len(this.taskIdToTaskState), *port),
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", cpuTaken),
util.NewScalarResource("mem", memoryTaken),
util.NewRangesResource("ports", []*mesos.Value_Range{taskPort}),
},
Data: configBlob,
}
log.Logger.Debug("Prepared task: %s with offer %s for launch. Ports: %s", task.GetName(), offer.Offer.Id.GetValue(), taskPort)
transport := NewElodinaTransport(fmt.Sprintf("http://%s:%d/assign", *offer.Offer.Hostname, *port), task, this.config.StaleDuration)
this.taskIdToTaskState[taskId.GetValue()] = transport
log.Logger.Debug("Prepared task: %s with offer %s for launch. Ports: %s", task.GetName(), offer.Offer.Id.GetValue(), taskPort)
offer.RemainingPorts = offer.RemainingPorts[1:]
offer.RemainingCpu -= cpuTaken
offer.RemainingMemory -= memoryTaken
return offer.Offer, task
} else {
log.Logger.Info("Not enough CPU and memory")
}
}
return nil, nil
}
示例15: build
func (t *task) build(slaveID string) {
t.Command = &mesosproto.CommandInfo{Shell: proto.Bool(false)}
t.Container = &mesosproto.ContainerInfo{
Type: mesosproto.ContainerInfo_DOCKER.Enum(),
Docker: &mesosproto.ContainerInfo_DockerInfo{
Image: &t.config.Image,
},
}
switch t.config.HostConfig.NetworkMode {
case "none":
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_NONE.Enum()
case "host":
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_HOST.Enum()
case "bridge", "":
for containerPort, bindings := range t.config.HostConfig.PortBindings {
for _, binding := range bindings {
fmt.Println(containerPort)
containerInfo := strings.SplitN(containerPort, "/", 2)
fmt.Println(containerInfo[0], containerInfo[1])
containerPort, err := strconv.ParseUint(containerInfo[0], 10, 32)
if err != nil {
log.Warn(err)
continue
}
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 32)
if err != nil {
log.Warn(err)
continue
}
protocol := "tcp"
if len(containerInfo) == 2 {
protocol = containerInfo[1]
}
t.Container.Docker.PortMappings = append(t.Container.Docker.PortMappings, &mesosproto.ContainerInfo_DockerInfo_PortMapping{
HostPort: proto.Uint32(uint32(hostPort)),
ContainerPort: proto.Uint32(uint32(containerPort)),
Protocol: proto.String(protocol),
})
t.Resources = append(t.Resources, mesosutil.NewRangesResource("ports", []*mesosproto.Value_Range{mesosutil.NewValueRange(hostPort, hostPort)}))
}
}
// TODO handle -P here
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
default:
log.Errorf("Unsupported network mode %q", t.config.HostConfig.NetworkMode)
t.Container.Docker.Network = mesosproto.ContainerInfo_DockerInfo_BRIDGE.Enum()
}
if cpus := t.config.CpuShares; cpus > 0 {
t.Resources = append(t.Resources, mesosutil.NewScalarResource("cpus", float64(cpus)))
}
if mem := t.config.Memory; mem > 0 {
t.Resources = append(t.Resources, mesosutil.NewScalarResource("mem", float64(mem/1024/1024)))
}
if len(t.config.Cmd) > 0 && t.config.Cmd[0] != "" {
t.Command.Value = &t.config.Cmd[0]
}
if len(t.config.Cmd) > 1 {
t.Command.Arguments = t.config.Cmd[1:]
}
for key, value := range t.config.Labels {
t.Container.Docker.Parameters = append(t.Container.Docker.Parameters, &mesosproto.Parameter{Key: proto.String("label"), Value: proto.String(fmt.Sprintf("%s=%s", key, value))})
}
t.SlaveId = &mesosproto.SlaveID{Value: &slaveID}
}