本文整理匯總了Golang中github.com/NetSys/quilt/db.Machine.Role方法的典型用法代碼示例。如果您正苦於以下問題:Golang Machine.Role方法的具體用法?Golang Machine.Role怎麽用?Golang Machine.Role使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/NetSys/quilt/db.Machine
的用法示例。
在下文中一共展示了Machine.Role方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: toDBMachine
// toDBMachine converts machines specified in the Stitch into db.Machines that can
// be compared against what's already in the db.
// Specifically, it sets the role of the db.Machine, the size (which may depend
// on RAM and CPU constraints), and the provider.
// Additionally, it skips machines with invalid roles, sizes or providers.
func toDBMachine(machines []stitch.Machine, maxPrice float64) []db.Machine {
var hasMaster, hasWorker bool
var dbMachines []db.Machine
for _, stitchm := range machines {
var m db.Machine
role, err := db.ParseRole(stitchm.Role)
if err != nil {
log.WithError(err).Error("Error parsing role.")
continue
}
m.Role = role
hasMaster = hasMaster || role == db.Master
hasWorker = hasWorker || role == db.Worker
p, err := db.ParseProvider(stitchm.Provider)
if err != nil {
log.WithError(err).Error("Error parsing provider.")
continue
}
m.Provider = p
m.Size = stitchm.Size
if m.Size == "" {
providerInst := provider.New(p)
m.Size = providerInst.ChooseSize(
stitchm.RAM, stitchm.CPU, maxPrice)
if m.Size == "" {
log.Errorf("No valid size for %v, skipping.", m)
continue
}
}
m.DiskSize = stitchm.DiskSize
if m.DiskSize == 0 {
m.DiskSize = defaultDiskSize
}
m.SSHKeys = stitchm.SSHKeys
m.Region = stitchm.Region
dbMachines = append(dbMachines, provider.DefaultRegion(m))
}
if !hasMaster && hasWorker {
log.Warning("A Master was specified but no workers.")
return nil
} else if hasMaster && !hasWorker {
log.Warning("A Worker was specified but no masters.")
return nil
}
return dbMachines
}
示例2: TestConfigConsistency
func TestConfigConsistency(t *testing.T) {
masterRole := db.RoleToPB(db.Master)
workerRole := db.RoleToPB(db.Worker)
fm, _ := startTest()
var master, worker db.Machine
fm.conn.Transact(func(view db.Database) error {
master = view.InsertMachine()
master.PublicIP = "1.1.1.1"
master.PrivateIP = master.PublicIP
master.CloudID = "ID1"
view.Commit(master)
worker = view.InsertMachine()
worker.PublicIP = "2.2.2.2"
worker.PrivateIP = worker.PublicIP
worker.CloudID = "ID2"
view.Commit(worker)
return nil
})
fm.init()
fm.conn.Transact(func(view db.Database) error {
master.Role = db.Master
worker.Role = db.Worker
view.Commit(master)
view.Commit(worker)
return nil
})
fm.runOnce()
checkRoles := func(fore foreman) {
r := fore.minions["1.1.1.1"].client.(*fakeClient).mc.Role
if r != masterRole {
t.Errorf("Master has role %v, should be %v", r, masterRole)
}
r = fore.minions["2.2.2.2"].client.(*fakeClient).mc.Role
if r != workerRole {
t.Errorf("Worker has role %v, should be %v", r, workerRole)
}
}
checkRoles(fm)
fm.stop()
newfm, clients := startTest()
newfm.conn = fm.conn
// Insert the clients into the client list to simulate fetching
// from the remote cluster
clients.clients["1.1.1.1"] = &fakeClient{clients, "1.1.1.1",
pb.MinionConfig{Role: masterRole}, pb.EtcdMembers{}}
clients.clients["2.2.2.2"] = &fakeClient{clients, "2.2.2.2",
pb.MinionConfig{Role: workerRole}, pb.EtcdMembers{}}
newfm.init()
newfm.runOnce()
checkRoles(newfm)
// After many runs, the roles should never change
for i := 0; i < 25; i++ {
newfm.runOnce()
}
checkRoles(newfm)
// Ensure that the DB machines have the correct roles as well.
newfm.conn.Transact(func(view db.Database) error {
machines := view.SelectFromMachine(nil)
for _, m := range machines {
if m.PublicIP == "1.1.1.1" && m.Role != db.Master {
t.Errorf("db Master had role %v, expected %v", m.Role,
db.Master)
}
if m.PublicIP == "2.2.2.2" && m.Role != db.Worker {
t.Errorf("db Worker had role %v, expected %v", m.Role,
db.Worker)
}
}
return nil
})
}
示例3: TestConfigConsistency
func TestConfigConsistency(t *testing.T) {
masterRole := db.RoleToPB(db.Master)
workerRole := db.RoleToPB(db.Worker)
conn, clients := startTest()
var master, worker db.Machine
conn.Transact(func(view db.Database) error {
master = view.InsertMachine()
master.PublicIP = "1.1.1.1"
master.PrivateIP = master.PublicIP
master.CloudID = "ID1"
view.Commit(master)
worker = view.InsertMachine()
worker.PublicIP = "2.2.2.2"
worker.PrivateIP = worker.PublicIP
worker.CloudID = "ID2"
view.Commit(worker)
return nil
})
Init(conn)
conn.Transact(func(view db.Database) error {
master.Role = db.Master
worker.Role = db.Worker
view.Commit(master)
view.Commit(worker)
return nil
})
RunOnce(conn)
checkRoles := func() {
r := minions["1.1.1.1"].client.(*fakeClient).mc.Role
assert.Equal(t, masterRole, r)
r = minions["2.2.2.2"].client.(*fakeClient).mc.Role
assert.Equal(t, workerRole, r)
}
checkRoles()
minions = map[string]*minion{}
// Insert the clients into the client list to simulate fetching
// from the remote cluster
clients.clients["1.1.1.1"] = &fakeClient{clients, "1.1.1.1",
pb.MinionConfig{Role: masterRole}}
clients.clients["2.2.2.2"] = &fakeClient{clients, "2.2.2.2",
pb.MinionConfig{Role: workerRole}}
Init(conn)
RunOnce(conn)
checkRoles()
// After many runs, the roles should never change
for i := 0; i < 25; i++ {
RunOnce(conn)
}
checkRoles()
// Ensure that the DB machines have the correct roles as well.
conn.Transact(func(view db.Database) error {
machines := view.SelectFromMachine(nil)
for _, m := range machines {
if m.PublicIP == "1.1.1.1" {
assert.Equal(t, db.Role(db.Master), m.Role)
}
if m.PublicIP == "2.2.2.2" {
assert.Equal(t, db.Role(db.Worker), m.Role)
}
}
return nil
})
}