本文整理匯總了Golang中github.com/NetSys/quilt/db.Conn.TriggerTick方法的典型用法代碼示例。如果您正苦於以下問題:Golang Conn.TriggerTick方法的具體用法?Golang Conn.TriggerTick怎麽用?Golang Conn.TriggerTick使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類github.com/NetSys/quilt/db.Conn
的用法示例。
在下文中一共展示了Conn.TriggerTick方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: Run
// Run blocks implementing the scheduler module.
func Run(conn db.Conn, dk docker.Client) {
bootWait(conn)
subnet := getMinionSubnet(conn)
err := dk.ConfigureNetwork(plugin.NetworkName, subnet)
if err != nil {
log.WithError(err).Fatal("Failed to configure network plugin")
}
loopLog := util.NewEventTimer("Scheduler")
trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable,
db.PlacementTable, db.EtcdTable).C
for range trig {
loopLog.LogStart()
minion, err := conn.MinionSelf()
if err != nil {
log.WithError(err).Warn("Missing self in the minion table.")
continue
}
if minion.Role == db.Worker {
subnet = updateNetwork(conn, dk, subnet)
runWorker(conn, dk, minion.PrivateIP, subnet)
} else if minion.Role == db.Master {
runMaster(conn)
}
loopLog.LogEnd()
}
}
示例2: watchLeader
func watchLeader(conn db.Conn, store Store) {
tickRate := electionTTL
if tickRate > 30 {
tickRate = 30
}
watch := store.Watch(leaderKey, 1*time.Second)
trigg := conn.TriggerTick(tickRate, db.EtcdTable)
for {
leader, _ := store.Get(leaderKey)
conn.Transact(func(view db.Database) error {
etcdRows := view.SelectFromEtcd(nil)
if len(etcdRows) == 1 {
etcdRows[0].LeaderIP = leader
view.Commit(etcdRows[0])
}
return nil
})
select {
case <-watch:
case <-trigg.C:
}
}
}
示例3: syncAuthorizedKeys
func syncAuthorizedKeys(conn db.Conn) {
waitForMinion(conn)
for range conn.TriggerTick(30, db.MinionTable).C {
if err := runOnce(conn); err != nil {
log.WithError(err).Error("Failed to sync keys")
}
}
}
示例4: Run
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
var clst *cluster
for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
clst = updateCluster(conn, clst)
// Somewhat of a crude rate-limit of once every five seconds to avoid
// stressing out the cloud providers with too many API calls.
sleep(5 * time.Second)
}
}
示例5: Run
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
loopLog := util.NewEventTimer("Network")
for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
loopLog.LogStart()
runWorker(conn, dk)
runMaster(conn)
loopLog.LogEnd()
}
}
示例6: campaign
func campaign(conn db.Conn, store Store) {
watch := store.Watch(leaderKey, 1*time.Second)
trigg := conn.TriggerTick(electionTTL/2, db.EtcdTable)
oldMaster := false
for {
select {
case <-watch:
case <-trigg.C:
}
etcdRows := conn.SelectFromEtcd(nil)
minion, err := conn.MinionSelf()
master := err == nil && minion.Role == db.Master && len(etcdRows) == 1
if !master {
if oldMaster {
commitLeader(conn, false, "")
}
continue
}
IP := minion.PrivateIP
if IP == "" {
continue
}
ttl := electionTTL * time.Second
if etcdRows[0].Leader {
err = store.Update(leaderKey, IP, ttl)
} else {
err = store.Create(leaderKey, IP, ttl)
}
if err == nil {
commitLeader(conn, true, IP)
} else {
clientErr, ok := err.(client.Error)
if !ok || clientErr.Code != client.ErrorCodeNodeExist {
log.WithError(err).Warn("Error setting leader key")
commitLeader(conn, false, "")
// Give things a chance to settle down.
time.Sleep(electionTTL * time.Second)
} else {
commitLeader(conn, false)
}
}
}
}
示例7: Run
// Run blocks implementing the network services.
func Run(conn db.Conn, dk docker.Client) {
for {
odb, err := ovsdb.Open()
if err == nil {
odb.Close()
break
}
log.WithError(err).Debug("Could not connect to ovsdb-server.")
time.Sleep(5 * time.Second)
}
for range conn.TriggerTick(30, db.MinionTable, db.ContainerTable,
db.ConnectionTable, db.LabelTable, db.EtcdTable).C {
runWorker(conn, dk)
runMaster(conn)
}
}
示例8: Run
// Run blocks implementing the scheduler module.
func Run(conn db.Conn) {
var sched scheduler
for range conn.TriggerTick(30, db.MinionTable, db.EtcdTable, db.ContainerTable,
db.PlacementTable).C {
minion, err := conn.MinionSelf()
if err != nil || !conn.EtcdLeader() || minion.Role != db.Master ||
minion.PrivateIP == "" {
sched = nil
continue
}
if sched == nil {
ip := minion.PrivateIP
sched = newSwarm(docker.New(fmt.Sprintf("tcp://%s:2377", ip)))
time.Sleep(60 * time.Second)
}
placements := conn.SelectFromPlacement(nil)
connections := conn.SelectFromConnection(nil)
// Each time we run through this loop, we may boot or terminate
// containers. These modification should, in turn, be reflected in the
// database themselves. For this reason, we attempt to sync until no
// database modifications happen (up to an arbitrary limit of three
// tries).
for i := 0; i < 3; i++ {
dkc, err := sched.list()
if err != nil {
log.WithError(err).Warning("Failed to get containers.")
break
}
var boot []db.Container
var term []string
conn.Transact(func(view db.Database) error {
term, boot = syncDB(view, dkc)
return nil
})
if len(term) == 0 && len(boot) == 0 {
break
}
sched.terminate(term)
sched.boot(boot, placements, connections)
}
}
}
示例9: newCluster
func newCluster(conn db.Conn, namespace string) *cluster {
clst := &cluster{
conn: conn,
trigger: conn.TriggerTick(30, db.ClusterTable, db.MachineTable),
fm: createForeman(conn),
namespace: namespace,
providers: make(map[db.Provider]provider.Provider),
}
for _, p := range allProviders {
inst := provider.New(p)
if err := inst.Connect(namespace); err == nil {
clst.providers[p] = inst
} else {
log.Debugf("Failed to connect to provider %s: %s", p, err)
}
}
return clst
}
示例10: Run
// Run continually checks 'conn' for cluster changes and recreates the cluster as
// needed.
func Run(conn db.Conn) {
var clst *cluster
for range conn.TriggerTick(60, db.ClusterTable).C {
var dbCluster db.Cluster
err := conn.Transact(func(db db.Database) error {
var err error
dbCluster, err = db.GetCluster()
return err
})
if err == nil && clst.namespace != dbCluster.Namespace {
if clst != nil {
clst.fm.stop()
clst.trigger.Stop()
}
clst = newCluster(conn, dbCluster.Namespace)
go clst.listen()
}
}
}
示例11: wakeChan
// wakeChan collapses the various channels these functions wait on into a single
// channel. Multiple redundant pings will be coalesced into a single message.
func wakeChan(conn db.Conn, store Store) chan struct{} {
minionWatch := store.Watch(minionDir, 1*time.Second)
trigg := conn.TriggerTick(30, db.MinionTable, db.ContainerTable, db.LabelTable,
db.EtcdTable).C
c := make(chan struct{}, 1)
go func() {
for {
select {
case <-minionWatch:
case <-trigg:
}
select {
case c <- struct{}{}:
default: // There's a notification in queue, no need for another.
}
}
}()
return c
}
示例12: Run
// Run updates the database in response to stitch changes in the cluster table.
func Run(conn db.Conn) {
for range conn.TriggerTick(30, db.ClusterTable, db.MachineTable, db.ACLTable).C {
conn.Transact(updateTxn)
}
}