本文整理匯總了Golang中github.com/NetSys/quilt/db.Conn類的典型用法代碼示例。如果您正苦於以下問題:Golang Conn類的具體用法?Golang Conn怎麽用?Golang Conn使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了Conn類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Init
// Init the first time the foreman operates on a new namespace. It queries the currently
// running VMs for their previously assigned roles, and writes them to the database.
func Init(conn db.Conn) {
for _, m := range minions {
m.client.Close()
}
minions = map[string]*minion{}
conn.Transact(func(view db.Database) error {
machines := view.SelectFromMachine(func(m db.Machine) bool {
return m.PublicIP != "" && m.PrivateIP != "" && m.CloudID != ""
})
updateMinionMap(machines)
forEachMinion(func(m *minion) {
var err error
m.config, err = m.client.getMinion()
m.connected = err == nil
})
for _, m := range minions {
role := db.PBToRole(m.config.Role)
if m.connected && role != db.None {
m.machine.Role = role
m.machine.Connected = m.connected
view.Commit(m.machine)
}
}
return nil
})
}
示例2: Run
// Run blocks implementing the scheduler module.
func Run(conn db.Conn, dk docker.Client) {
bootWait(conn)
subnet := getMinionSubnet(conn)
err := dk.ConfigureNetwork(plugin.NetworkName, subnet)
if err != nil {
log.WithError(err).Fatal("Failed to configure network plugin")
}
loopLog := util.NewEventTimer("Scheduler")
trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable,
db.PlacementTable, db.EtcdTable).C
for range trig {
loopLog.LogStart()
minion, err := conn.MinionSelf()
if err != nil {
log.WithError(err).Warn("Missing self in the minion table.")
continue
}
if minion.Role == db.Worker {
subnet = updateNetwork(conn, dk, subnet)
runWorker(conn, dk, minion.PrivateIP, subnet)
} else if minion.Role == db.Master {
runMaster(conn)
}
loopLog.LogEnd()
}
}
示例3: testContainerTxn
func testContainerTxn(t *testing.T, conn db.Conn, spec string) {
compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
assert.Nil(t, err)
var containers []db.Container
conn.Transact(func(view db.Database) error {
updatePolicy(view, db.Master, compiled.String())
containers = view.SelectFromContainer(nil)
return nil
})
for _, e := range queryContainers(compiled) {
found := false
for i, c := range containers {
if e.Image == c.Image &&
reflect.DeepEqual(e.Command, c.Command) &&
util.EditDistance(c.Labels, e.Labels) == 0 {
containers = append(containers[:i], containers[i+1:]...)
found = true
break
}
}
assert.True(t, found)
}
assert.Empty(t, containers)
}
示例4: selectACL
func selectACL(conn db.Conn) (acl db.ACL, err error) {
err = conn.Transact(func(view db.Database) error {
acl, err = view.GetACL()
return err
})
return
}
示例5: watchLeader
func watchLeader(conn db.Conn, store Store) {
tickRate := electionTTL
if tickRate > 30 {
tickRate = 30
}
watch := store.Watch(leaderKey, 1*time.Second)
trigg := conn.TriggerTick(tickRate, db.EtcdTable)
for {
leader, _ := store.Get(leaderKey)
conn.Transact(func(view db.Database) error {
etcdRows := view.SelectFromEtcd(nil)
if len(etcdRows) == 1 {
etcdRows[0].LeaderIP = leader
view.Commit(etcdRows[0])
}
return nil
})
select {
case <-watch:
case <-trigg.C:
}
}
}
示例6: runNetworkWorker
func runNetworkWorker(conn db.Conn, store Store) {
// If the directories don't exist, create them so we may watch them. If they
// exist already these will return an error that we won't log, but that's ok
// cause the loop will error too.
store.Mkdir(labelDir)
store.Mkdir(containerDir)
for range wakeChan(conn, store) {
labelDir, err := getDirectory(store, labelDir)
containerDir, err2 := getDirectory(store, containerDir)
if err2 != nil {
err = err2
}
if err != nil {
log.WithError(err).Warn("Failed to read from cluster store.")
continue
}
conn.Transact(func(view db.Database) error {
readContainerTransact(view, containerDir)
readLabelTransact(view, labelDir)
return nil
})
}
}
示例7: testConnectionTxn
func testConnectionTxn(t *testing.T, conn db.Conn, spec string) {
compiled, err := stitch.FromJavascript(spec, stitch.DefaultImportGetter)
assert.Nil(t, err)
var connections []db.Connection
conn.Transact(func(view db.Database) error {
updatePolicy(view, db.Master, compiled.String())
connections = view.SelectFromConnection(nil)
return nil
})
exp := compiled.Connections
for _, e := range exp {
found := false
for i, c := range connections {
if e.From == c.From && e.To == c.To && e.MinPort == c.MinPort &&
e.MaxPort == c.MaxPort {
connections = append(
connections[:i], connections[i+1:]...)
found = true
break
}
}
assert.True(t, found)
}
assert.Empty(t, connections)
}
示例8: runMaster
func runMaster(conn db.Conn) {
conn.Transact(func(view db.Database) error {
if view.EtcdLeader() {
placeContainers(view)
}
return nil
})
}
示例9: syncAuthorizedKeys
func syncAuthorizedKeys(conn db.Conn) {
waitForMinion(conn)
for range conn.TriggerTick(30, db.MinionTable).C {
if err := runOnce(conn); err != nil {
log.WithError(err).Error("Failed to sync keys")
}
}
}
示例10: waitForMinion
func waitForMinion(conn db.Conn) {
for {
if _, err := conn.MinionSelf(); err == nil {
return
}
time.Sleep(500 * time.Millisecond)
}
}
示例11: Run
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
var clst *cluster
for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
clst = updateCluster(conn, clst)
// Somewhat of a crude rate-limit of once every five seconds to avoid
// stressing out the cloud providers with too many API calls.
sleep(5 * time.Second)
}
}
示例12: runNetwork
func runNetwork(conn db.Conn, store Store) {
for range wakeChan(conn, store) {
// If the etcd read failed, we only want to update the db if it
// failed because a key was missing (has not been created yet).
// In all other cases, we skip this iteration.
etcdData, err := readEtcd(store)
if err != nil {
etcdErr, ok := err.(client.Error)
if !ok || etcdErr.Code != client.ErrorCodeKeyNotFound {
log.WithError(err).Error("Etcd transaction failed.")
continue
}
log.WithError(err).Debug()
}
leader := false
var containers []db.Container
conn.Transact(func(view db.Database) error {
leader = view.EtcdLeader()
containers = view.SelectFromContainer(func(c db.Container) bool {
return c.Minion != ""
})
minion, err := view.MinionSelf()
if err == nil && minion.Role == db.Worker {
updateWorker(view, minion, store, etcdData)
}
ipMap, err := loadMinionIPs(store)
if err != nil {
log.WithError(err).Error("Etcd read minion IPs failed")
return nil
}
// It would likely be more efficient to perform the etcd write
// outside of the DB transact. But, if we perform the writes
// after the transact, there is no way to ensure that the writes
// were successful before updating the DB with the information
// produced by the updateEtcd* functions (not considering the
// etcd writes they perform).
if leader {
etcdData, err = updateEtcd(store, etcdData, containers)
if err != nil {
log.WithError(err).Error("Etcd update failed.")
return nil
}
updateLeaderDBC(view, containers, etcdData, ipMap)
}
updateDBLabels(view, etcdData, ipMap)
return nil
})
}
}
示例13: Run
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
loopLog := util.NewEventTimer("Network")
for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
loopLog.LogStart()
runWorker(conn, dk)
runMaster(conn)
loopLog.LogEnd()
}
}
示例14: UpdatePolicy
// UpdatePolicy executes transactions on 'conn' to make it reflect a new policy,
// 'stitch'.
func UpdatePolicy(conn db.Conn, stitch stitch.Stitch) error {
txn := func(db db.Database) error {
return updateTxn(db, stitch)
}
if err := conn.Transact(txn); err != nil {
return err
}
return nil
}
示例15: bootWait
func bootWait(conn db.Conn) {
for workerCount := 0; workerCount <= 0; {
workerCount = 0
for _, m := range conn.SelectFromMinion(nil) {
if m.Role == db.Worker {
workerCount++
}
}
time.Sleep(30 * time.Second)
}
}