本文整理汇总了Golang中github.com/mesos/mesos-go/mesosutil.NewMasterInfo函数的典型用法代码示例。如果您正苦于以下问题:Golang NewMasterInfo函数的具体用法?Golang NewMasterInfo怎么用?Golang NewMasterInfo使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了NewMasterInfo函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Golang代码示例。
示例1: CreateMasterInfo
// Super-useful utility func that attempts to build a mesos.MasterInfo from a
// upid.UPID specification. An attempt is made to determine the IP address of
// the UPID's Host and any errors during such resolution will result in a nil
// returned result. A nil result is also returned upon errors parsing the Port
// specification of the UPID.
//
// TODO(jdef) make this a func of upid.UPID so that callers can invoke somePid.MasterInfo()?
//
func CreateMasterInfo(pid *upid.UPID) *mesos.MasterInfo {
if pid == nil {
return nil
}
port, err := strconv.Atoi(pid.Port)
if err != nil {
log.Errorf("failed to parse port: %v", err)
return nil
}
//TODO(jdef) what about (future) ipv6 support?
var ipv4 net.IP
if addrs, err := net.LookupIP(pid.Host); err == nil {
for _, ip := range addrs {
if ip = ip.To4(); ip != nil {
ipv4 = ip
break
}
}
if ipv4 == nil {
log.Errorf("host does not resolve to an IPv4 address: %v", pid.Host)
return nil
}
} else {
log.Errorf("failed to lookup IPs for host '%v': %v", pid.Host, err)
return nil
}
packedip := binary.BigEndian.Uint32(ipv4) // network byte order is big-endian
mi := util.NewMasterInfo(pid.ID, packedip, uint32(port))
mi.Pid = proto.String(pid.String())
if pid.Host != "" {
mi.Hostname = proto.String(pid.Host)
}
return mi
}
示例2: TestOnLeaderChangeIp
func TestOnLeaderChangeIp(t *testing.T) {
d := NewZkLeaderDetector()
mi := util.NewMasterInfo("id", 0x01020304, 5050)
d.onLeaderChange(mi)
leader := <-d.newLeader
assert.Equal(t, *leader, "1.2.3.4:5050")
}
示例3: makeTestMasterInfo
func makeTestMasterInfo() []byte {
miPb := util.NewMasterInfo("[email protected]:5050", 123456789, 400)
data, err := proto.Marshal(miPb)
if err != nil {
panic(err)
}
return data
}
示例4: newTestMasterInfo
func newTestMasterInfo(id int) []byte {
miPb := util.NewMasterInfo(fmt.Sprintf("master(%d)@localhost:5050", id), 123456789, 400)
data, err := proto.Marshal(miPb)
if err != nil {
panic(err)
}
return data
}
示例5: makeMasterInfo
func (m *MockMasterDetector) makeMasterInfo() []byte {
miPb := util.NewMasterInfo("master", 123456789, 400)
miPb.Pid = proto.String("[email protected]:5050")
data, err := proto.Marshal(miPb)
if err != nil {
panic(err)
}
return data
}
示例6: TestOnLeaderChangeHostname
func TestOnLeaderChangeHostname(t *testing.T) {
host := "2.3.4.5"
d := NewZkLeaderDetector()
mi := util.NewMasterInfo("id", 0x01020304, 5050)
mi.Hostname = &host
d.onLeaderChange(mi)
leader := <-d.newLeader
assert.Equal(t, *leader, "2.3.4.5:5050")
}
示例7: TestStartup
func TestStartup(t *gotesting.T) {
mockdriver := &MockSchedulerDriver{}
testScheduler := NewEtcdScheduler(1, 0, 0, false, []*mesos.CommandInfo_URI{}, false, 4096, 1, 256)
testScheduler.running = map[string]*config.Node{
"etcd-1": nil,
"etcd-2": nil,
}
reconciliation := map[string]string{
"etcd-1": "slave-1",
"etcd-2": "slave-2",
}
testScheduler.reconciliationInfoFunc = func([]string, string, string) (map[string]string, error) {
return reconciliation, nil
}
testScheduler.updateReconciliationInfoFunc = func(info map[string]string, _ []string, _ string, _ string) error {
reconciliation = info
return nil
}
// On registration, ReconcileTasks should be called.
mockdriver.Lock()
mockdriver.On(
"ReconcileTasks",
0,
).Return(mesos.Status_DRIVER_RUNNING, nil).Once()
mockdriver.On(
"ReconcileTasks",
2,
).Return(mesos.Status_DRIVER_RUNNING, nil).Once()
mockdriver.Unlock()
masterInfo := util.NewMasterInfo("master-1", 0, 0)
masterInfo.Hostname = proto.String("test-host")
testScheduler.Registered(
mockdriver,
util.NewFrameworkID("framework-1"),
masterInfo,
)
assert.Equal(t, Immutable, testScheduler.state,
"Scheduler should be placed in the Immutable state after registration "+
"as we wait for status updates to arrive in response to ReconcileTasks.")
assert.Equal(t, 0, len(testScheduler.running),
"Scheduler's running list should be cleared on registration, "+
"to be populated by ReconcileTasks.")
time.Sleep(50 * time.Millisecond)
mockdriver.Lock()
defer mockdriver.Unlock()
mockdriver.AssertExpectations(t)
}
示例8: CreateMasterInfo
// Super-useful utility func that attempts to build a mesos.MasterInfo from a
// upid.UPID specification. An attempt is made to determine the IP address of
// the UPID's Host and any errors during such resolution will result in a nil
// returned result. A nil result is also returned upon errors parsing the Port
// specification of the UPID.
//
// TODO(jdef) make this a func of upid.UPID so that callers can invoke somePid.MasterInfo()?
//
func CreateMasterInfo(pid *upid.UPID) *mesos.MasterInfo {
if pid == nil {
return nil
}
port, err := strconv.Atoi(pid.Port)
if err != nil {
log.Errorf("failed to parse port: %v", err)
return nil
}
//TODO(jdef) what about (future) ipv6 support?
var ipv4 net.IP
if ipv4 = net.ParseIP(pid.Host); ipv4 != nil {
// This is needed for the people cross-compiling from macos to linux.
// The cross-compiled version of net.LookupIP() fails to handle plain IPs.
// See https://github.com/mesos/mesos-go/pull/117
} else if addrs, err := net.LookupIP(pid.Host); err == nil {
for _, ip := range addrs {
if ip = ip.To4(); ip != nil {
ipv4 = ip
break
}
}
if ipv4 == nil {
log.Errorf("host does not resolve to an IPv4 address: %v", pid.Host)
return nil
}
} else {
log.Errorf("failed to lookup IPs for host '%v': %v", pid.Host, err)
return nil
}
packedip := binary.BigEndian.Uint32(ipv4) // network byte order is big-endian
mi := util.NewMasterInfo(pid.ID, packedip, uint32(port))
mi.Pid = proto.String(pid.String())
if pid.Host != "" {
mi.Hostname = proto.String(pid.Host)
}
return mi
}
示例9: Detect
func (md FakeMasterDetector) Detect(cb detector.MasterChanged) error {
md.callback = cb
leadingMaster := mesosutil.NewMasterInfo(TEST_MASTER_ID, TEST_MASTER_IP, TEST_MASTER_PORT)
cb.OnMasterChanged(leadingMaster)
return nil
}
示例10: TestPlugin_LifeCycle
//.........这里部分代码省略.........
offerId mesos.OfferID
taskInfo *mesos.TaskInfo
}
launchedTasks := make(chan LaunchedTask, 1)
launchTasksCalledFunc := func(args mock.Arguments) {
offerIDs := args.Get(0).([]*mesos.OfferID)
taskInfos := args.Get(1).([]*mesos.TaskInfo)
assert.Equal(1, len(offerIDs))
assert.Equal(1, len(taskInfos))
launchedTasks <- LaunchedTask{
offerId: *offerIDs[0],
taskInfo: taskInfos[0],
}
}
mockDriver.On("LaunchTasks", mAny("[]*mesosproto.OfferID"), mAny("[]*mesosproto.TaskInfo"), mAny("*mesosproto.Filters")).
Return(mesos.Status_DRIVER_RUNNING, nil).Run(launchTasksCalledFunc)
mockDriver.On("DeclineOffer", mAny("*mesosproto.OfferID"), mAny("*mesosproto.Filters")).
Return(mesos.Status_DRIVER_RUNNING, nil)
// elect master with mock driver
driverFactory := ha.DriverFactory(func() (bindings.SchedulerDriver, error) {
return mockDriver, nil
})
schedulerProcess.Elect(driverFactory)
elected := schedulerProcess.Elected()
// driver will be started
<-started
// tell scheduler to be registered
testScheduler.Registered(
mockDriver,
util.NewFrameworkID("kubernetes-id"),
util.NewMasterInfo("master-id", (192<<24)+(168<<16)+(0<<8)+1, 5050),
)
// wait for being elected
<-elected
//TODO(jdef) refactor things above here into a test suite setup of some sort
// fake new, unscheduled pod
pod, i := NewTestPod()
podListWatch.Add(pod, true) // notify watchers
// wait for failedScheduling event because there is no offer
assert.EventWithReason(eventObserver, "failedScheduling", "failedScheduling event not received")
// add some matching offer
offers := []*mesos.Offer{NewTestOffer(fmt.Sprintf("offer%d", i))}
testScheduler.ResourceOffers(nil, offers)
// and wait for scheduled pod
assert.EventWithReason(eventObserver, "scheduled")
select {
case launchedTask := <-launchedTasks:
// report back that the task has been staged, and then started by mesos
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_STAGING))
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_RUNNING))
// check that ExecutorInfo.data has the static pod data
assert.Len(launchedTask.taskInfo.Executor.Data, 3)
// report back that the task has been lost
mockDriver.AssertNumberOfCalls(t, "SendFrameworkMessage", 0)
testScheduler.StatusUpdate(mockDriver, newTaskStatusForTask(launchedTask.taskInfo, mesos.TaskState_TASK_LOST))
示例11: Start
func (lt lifecycleTest) Start() <-chan LaunchedTask {
assert := &EventAssertions{*assert.New(lt.t)}
lt.sched.Run(lt.schedulerProc.Terminal())
// init framework
err := lt.framework.Init(
lt.sched,
lt.schedulerProc.Master(),
http.DefaultServeMux,
)
assert.NoError(err)
lt.driver.On("Start").Return(mesos.Status_DRIVER_RUNNING, nil).Once()
started := lt.driver.Upon()
lt.driver.On("ReconcileTasks",
mock.AnythingOfType("[]*mesosproto.TaskStatus"),
).Return(mesos.Status_DRIVER_RUNNING, nil)
lt.driver.On("SendFrameworkMessage",
mock.AnythingOfType("*mesosproto.ExecutorID"),
mock.AnythingOfType("*mesosproto.SlaveID"),
mock.AnythingOfType("string"),
).Return(mesos.Status_DRIVER_RUNNING, nil)
launchedTasks := make(chan LaunchedTask, 1)
launchTasksFunc := func(args mock.Arguments) {
offerIDs := args.Get(0).([]*mesos.OfferID)
taskInfos := args.Get(1).([]*mesos.TaskInfo)
assert.Equal(1, len(offerIDs))
assert.Equal(1, len(taskInfos))
launchedTasks <- LaunchedTask{
offerId: *offerIDs[0],
taskInfo: taskInfos[0],
}
}
lt.driver.On("LaunchTasks",
mock.AnythingOfType("[]*mesosproto.OfferID"),
mock.AnythingOfType("[]*mesosproto.TaskInfo"),
mock.AnythingOfType("*mesosproto.Filters"),
).Return(mesos.Status_DRIVER_RUNNING, nil).Run(launchTasksFunc)
lt.driver.On("DeclineOffer",
mock.AnythingOfType("*mesosproto.OfferID"),
mock.AnythingOfType("*mesosproto.Filters"),
).Return(mesos.Status_DRIVER_RUNNING, nil)
// elect master with mock driver
driverFactory := ha.DriverFactory(func() (bindings.SchedulerDriver, error) {
return lt.driver, nil
})
lt.schedulerProc.Elect(driverFactory)
elected := lt.schedulerProc.Elected()
// driver will be started
<-started
// tell scheduler to be registered
lt.framework.Registered(
lt.driver,
mesosutil.NewFrameworkID("kubernetes-id"),
mesosutil.NewMasterInfo("master-id", (192<<24)+(168<<16)+(0<<8)+1, 5050),
)
// wait for being elected
<-elected
return launchedTasks
}
示例12: configureServerWithRegisteredFramework
}
return ok
}
func (suite *SchedulerIntegrationTestSuite) configureServerWithRegisteredFramework() bool {
// suite.framework is used to initialize the FrameworkInfo of
// the driver, so if we clear the Id then we'll expect a registration message
id := suite.framework.Id
suite.framework.Id = nil
suite.registeredFrameworkId = id
return suite.configure(id)
}
var defaultMockServerConfigurator = mockServerConfigurator(func(frameworkId *mesos.FrameworkID, suite *SchedulerIntegrationTestSuite) {
t := suite.T()
masterInfo := util.NewMasterInfo("master", 123456, 1234)
suite.server.On("/master/mesos.internal.RegisterFrameworkMessage").Do(func(rsp http.ResponseWriter, req *http.Request) {
if suite.validator != nil {
t.Logf("validating registration request")
suite.validator(rsp, req)
} else {
ioutil.ReadAll(req.Body)
defer req.Body.Close()
rsp.WriteHeader(http.StatusAccepted)
}
// this is what the mocked scheduler is expecting to receive
suite.driver.FrameworkRegistered(suite.driver.Context(), suite.driver.MasterPID(), &mesos.FrameworkRegisteredMessage{
FrameworkId: frameworkId,
MasterInfo: masterInfo,
})
})
示例13: TestGrowToDesiredAfterReconciliation
func TestGrowToDesiredAfterReconciliation(t *gotesting.T) {
testScheduler := NewEtcdScheduler(3, 0, 0, true, []*mesos.CommandInfo_URI{}, false, 4096, 1, 256)
reconciliation := map[string]string{
"etcd-1": "slave-1",
"etcd-2": "slave-2",
}
testScheduler.reconciliationInfoFunc = func([]string, string, string) (map[string]string, error) {
return reconciliation, nil
}
testScheduler.updateReconciliationInfoFunc = func(info map[string]string, _ []string, _ string, _ string) error {
reconciliation = info
return nil
}
testScheduler.masterInfo = util.NewMasterInfo("master-1", 0, 0)
mockdriver := &MockSchedulerDriver{
runningStatuses: make(chan *mesos.TaskStatus, 10),
scheduler: testScheduler,
}
testScheduler.state = Mutable
testScheduler.healthCheck = func(map[string]*config.Node) error {
return nil
}
// Push more than enough offers to shoot self in foot if unchecked.
for _, offer := range []*mesos.Offer{
NewOffer("1"),
NewOffer("2"),
NewOffer("3"),
} {
testScheduler.offerCache.Push(offer)
}
memberList := config.ClusterMemberList{
Members: []httptypes.Member{
{
ID: "1",
Name: "etcd-1",
PeerURLs: nil,
ClientURLs: nil,
},
{
ID: "2",
Name: "etcd-2",
PeerURLs: nil,
ClientURLs: nil,
},
},
}
_, port1, err := emtesting.NewTestEtcdServer(t, memberList)
if err != nil {
t.Fatalf("Failed to create test etcd server: %s", err)
}
_, port2, err := emtesting.NewTestEtcdServer(t, memberList)
if err != nil {
t.Fatalf("Failed to create test etcd server: %s", err)
}
// Valid reconciled tasks should be added to the running list.
mockdriver.On(
"ReconcileTasks",
0,
).Return(mesos.Status_DRIVER_RUNNING, nil).Once()
for _, taskStatus := range []*mesos.TaskStatus{
util.NewTaskStatus(
util.NewTaskID("etcd-1 localhost 0 "+strconv.Itoa(int(port1))+" 0"),
mesos.TaskState_TASK_RUNNING,
),
util.NewTaskStatus(
util.NewTaskID("etcd-2 localhost 0 "+strconv.Itoa(int(port2))+" 0"),
mesos.TaskState_TASK_RUNNING,
),
} {
mockdriver.runningStatuses <- taskStatus
}
// Scheduler should grow cluster to desired number of nodes.
offer := NewOffer("1")
mockdriver.On(
"LaunchTasks",
[]*mesos.OfferID{
offer.Id,
},
[]*mesos.TaskInfo{
{
Resources: []*mesos.Resource{
util.NewScalarResource("cpus", 1),
util.NewScalarResource("mem", 256),
util.NewScalarResource("disk", 4096),
util.NewRangesResource("ports", []*mesos.Value_Range{
util.NewValueRange(uint64(0), uint64(2)),
}),
},
},
},
&mesos.Filters{
RefuseSeconds: proto.Float64(1),
//.........这里部分代码省略.........
示例14: TestReconciliationOnStartup
func TestReconciliationOnStartup(t *gotesting.T) {
testScheduler := NewEtcdScheduler(3, 0, 0, true, []*mesos.CommandInfo_URI{}, false, 4096, 1, 256)
mockdriver := &MockSchedulerDriver{
runningStatuses: make(chan *mesos.TaskStatus, 10),
scheduler: testScheduler,
}
reconciliation := map[string]string{
"etcd-1": "slave-1",
"etcd-2": "slave-2",
"etcd-3": "slave-3",
}
testScheduler.reconciliationInfoFunc = func([]string, string, string) (map[string]string, error) {
return reconciliation, nil
}
testScheduler.updateReconciliationInfoFunc = func(info map[string]string, _ []string, _ string, _ string) error {
reconciliation = info
return nil
}
// Valid reconciled tasks should be added to the running list.
for _, taskStatus := range []*mesos.TaskStatus{
util.NewTaskStatus(
util.NewTaskID("etcd-1 localhost 0 0 0"),
mesos.TaskState_TASK_RUNNING,
),
util.NewTaskStatus(
util.NewTaskID("etcd-2 localhost 0 0 0"),
mesos.TaskState_TASK_RUNNING,
),
util.NewTaskStatus(
util.NewTaskID("etcd-3 localhost 0 0 0"),
mesos.TaskState_TASK_RUNNING,
),
} {
mockdriver.runningStatuses <- taskStatus
}
mockdriver.Lock()
mockdriver.On(
"ReconcileTasks",
0,
).Return(mesos.Status_DRIVER_RUNNING, nil).Once()
mockdriver.On(
"ReconcileTasks",
3,
).Return(mesos.Status_DRIVER_RUNNING, nil).Once()
mockdriver.Unlock()
masterInfo := util.NewMasterInfo("master-1", 0, 0)
masterInfo.Hostname = proto.String("test-host")
testScheduler.Registered(
mockdriver,
util.NewFrameworkID("framework-1"),
masterInfo,
)
time.Sleep(50 * time.Millisecond)
mockdriver.Lock()
defer mockdriver.Unlock()
assert.Equal(t, 3, len(testScheduler.running),
"Scheduler should reconcile tasks properly.")
mockdriver.AssertExpectations(t)
}
示例15: TestPlugin_LifeCycle
// Test to create the scheduler plugin with the config returned by the scheduler,
// and play through the whole life cycle of the plugin while creating pods, deleting
// and failing them.
func TestPlugin_LifeCycle(t *testing.T) {
t.Skip("disabled due to flakiness; see #10795")
assert := &EventAssertions{*assert.New(t)}
// create a fake pod watch. We use that below to submit new pods to the scheduler
podListWatch := NewMockPodsListWatch(api.PodList{})
// create fake apiserver
testApiServer := NewTestServer(t, api.NamespaceDefault, podListWatch)
defer testApiServer.server.Close()
// create executor with some data for static pods if set
executor := util.NewExecutorInfo(
util.NewExecutorID("executor-id"),
util.NewCommandInfo("executor-cmd"),
)
executor.Data = []byte{0, 1, 2}
// create scheduler
testScheduler := New(Config{
Executor: executor,
Client: client.NewOrDie(&client.Config{Host: testApiServer.server.URL, Version: testapi.Version()}),
ScheduleFunc: FCFSScheduleFunc,
Schedcfg: *schedcfg.CreateDefaultConfig(),
})
assert.NotNil(testScheduler.client, "client is nil")
assert.NotNil(testScheduler.executor, "executor is nil")
assert.NotNil(testScheduler.offers, "offer registry is nil")
// create scheduler process
schedulerProcess := ha.New(testScheduler)
// get plugin config from it
c := testScheduler.NewPluginConfig(schedulerProcess.Terminal(), http.DefaultServeMux, &podListWatch.ListWatch)
assert.NotNil(c)
// make events observable
eventObserver := NewEventObserver()
c.Recorder = eventObserver
// create plugin
p := NewPlugin(c)
assert.NotNil(p)
// run plugin
p.Run(schedulerProcess.Terminal())
defer schedulerProcess.End()
// init scheduler
err := testScheduler.Init(schedulerProcess.Master(), p, http.DefaultServeMux)
assert.NoError(err)
// create mock mesos scheduler driver
mockDriver := &joinableDriver{}
mockDriver.On("Start").Return(mesos.Status_DRIVER_RUNNING, nil).Once()
started := mockDriver.Upon()
mAny := mock.AnythingOfType
mockDriver.On("ReconcileTasks", mAny("[]*mesosproto.TaskStatus")).Return(mesos.Status_DRIVER_RUNNING, nil)
mockDriver.On("SendFrameworkMessage", mAny("*mesosproto.ExecutorID"), mAny("*mesosproto.SlaveID"), mAny("string")).
Return(mesos.Status_DRIVER_RUNNING, nil)
launchedTasks := make(chan *mesos.TaskInfo, 1)
launchTasksCalledFunc := func(args mock.Arguments) {
taskInfos := args.Get(1).([]*mesos.TaskInfo)
assert.Equal(1, len(taskInfos))
launchedTasks <- taskInfos[0]
}
mockDriver.On("LaunchTasks", mAny("[]*mesosproto.OfferID"), mAny("[]*mesosproto.TaskInfo"), mAny("*mesosproto.Filters")).
Return(mesos.Status_DRIVER_RUNNING, nil).Run(launchTasksCalledFunc)
// elect master with mock driver
driverFactory := ha.DriverFactory(func() (bindings.SchedulerDriver, error) {
return mockDriver, nil
})
schedulerProcess.Elect(driverFactory)
elected := schedulerProcess.Elected()
// driver will be started
<-started
// tell scheduler to be registered
testScheduler.Registered(
mockDriver,
util.NewFrameworkID("kubernetes-id"),
util.NewMasterInfo("master-id", (192<<24)+(168<<16)+(0<<8)+1, 5050),
)
// wait for being elected
<-elected
//TODO(jdef) refactor things above here into a test suite setup of some sort
// fake new, unscheduled pod
pod1 := NewTestPod(1)
podListWatch.Add(pod1, true) // notify watchers
//.........这里部分代码省略.........