本文整理匯總了Golang中github.com/docker/swarmkit/manager/state.Watch函數的典型用法代碼示例。如果您正苦於以下問題:Golang Watch函數的具體用法?Golang Watch怎麽用?Golang Watch使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了Watch函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: WaitForCluster
// WaitForCluster waits until node observes that the cluster wide config is
// committed to raft. This ensures that we can see and serve informations
// related to the cluster.
func WaitForCluster(ctx context.Context, n *Node) (cluster *api.Cluster, err error) {
watch, cancel := state.Watch(n.MemoryStore().WatchQueue(), state.EventCreateCluster{})
defer cancel()
var clusters []*api.Cluster
n.MemoryStore().View(func(readTx store.ReadTx) {
clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName))
})
if err != nil {
return nil, err
}
if len(clusters) == 1 {
cluster = clusters[0]
} else {
select {
case e := <-watch:
cluster = e.(state.EventCreateCluster).Cluster
case <-ctx.Done():
return nil, ctx.Err()
}
}
return cluster, nil
}
示例2: Run
// Run is the ConstraintEnforcer's main loop.
func (ce *ConstraintEnforcer) Run() {
defer close(ce.doneChan)
watcher, cancelWatch := state.Watch(ce.store.WatchQueue(), state.EventUpdateNode{})
defer cancelWatch()
var (
nodes []*api.Node
err error
)
ce.store.View(func(readTx store.ReadTx) {
nodes, err = store.FindNodes(readTx, store.All)
})
if err != nil {
log.L.WithError(err).Error("failed to check nodes for noncompliant tasks")
} else {
for _, node := range nodes {
ce.shutdownNoncompliantTasks(node)
}
}
for {
select {
case event := <-watcher:
node := event.(state.EventUpdateNode).Node
ce.shutdownNoncompliantTasks(node)
case <-ce.stopChan:
return
}
}
}
示例3: TestSchedulerNoReadyNodes
func TestSchedulerNoReadyNodes(t *testing.T) {
ctx := context.Background()
initialTask := &api.Task{
ID: "id1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial task
assert.NoError(t, store.CreateTask(tx, initialTask))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
failure := watchAssignmentFailure(t, watch)
assert.Equal(t, "no suitable node", failure.Status.Message)
err = s.Update(func(tx store.Tx) error {
// Create a ready node. The task should get assigned to this
// node.
node := &api.Node{
ID: "newnode",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "newnode",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
assert.NoError(t, store.CreateNode(tx, node))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, "newnode", assignment.NodeID)
}
示例4: updateTask
func (u *Updater) updateTask(ctx context.Context, slot slot, updated *api.Task) error {
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
_, err := u.store.Batch(func(batch *store.Batch) error {
err := batch.Update(func(tx store.Tx) error {
if err := store.CreateTask(tx, updated); err != nil {
return err
}
return nil
})
if err != nil {
return err
}
u.removeOldTasks(ctx, batch, slot)
for _, t := range slot {
if t.DesiredState == api.TaskStateRunning {
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, nil, t, updated.ID, 0, true)
break
}
}
return nil
})
if err != nil {
return err
}
if delayStartCh != nil {
<-delayStartCh
}
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
示例5: updateTask
func (u *Updater) updateTask(ctx context.Context, original, updated *api.Task) error {
log.G(ctx).Debugf("replacing %s with %s", original.ID, updated.ID)
// Kick off the watch before even creating the updated task. This is in order to avoid missing any event.
taskUpdates, cancel := state.Watch(u.watchQueue, state.EventUpdateTask{
Task: &api.Task{ID: updated.ID},
Checks: []state.TaskCheckFunc{state.TaskCheckID},
})
defer cancel()
var delayStartCh <-chan struct{}
// Atomically create the updated task and bring down the old one.
err := u.store.Update(func(tx store.Tx) error {
t := store.GetTask(tx, original.ID)
if t == nil {
return fmt.Errorf("task %s not found while trying to update it", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
if err := store.UpdateTask(tx, t); err != nil {
return err
}
if err := store.CreateTask(tx, updated); err != nil {
return err
}
// Wait for the old task to stop or time out, and then set the new one
// to RUNNING.
delayStartCh = u.restarts.DelayStart(ctx, tx, original, updated.ID, 0, true)
return nil
})
if err != nil {
return err
}
<-delayStartCh
// Wait for the new task to come up.
// TODO(aluzzardi): Consider adding a timeout here.
for {
select {
case e := <-taskUpdates:
updated = e.(state.EventUpdateTask).Task
if updated.Status.State >= api.TaskStateRunning {
return nil
}
case <-u.stopChan:
return nil
}
}
}
示例6: NewTaskReaper
// NewTaskReaper creates a new TaskReaper.
func NewTaskReaper(store *store.MemoryStore) *TaskReaper {
watcher, cancel := state.Watch(store.WatchQueue(), state.EventCreateTask{}, state.EventUpdateCluster{})
return &TaskReaper{
store: store,
watcher: watcher,
cancelWatch: cancel,
dirty: make(map[instanceTuple]struct{}),
stopChan: make(chan struct{}),
doneChan: make(chan struct{}),
}
}
示例7: Run
func (s *subscription) Run(ctx context.Context) {
s.ctx, s.cancel = context.WithCancel(ctx)
if s.follow() {
wq := s.store.WatchQueue()
ch, cancel := state.Watch(wq, state.EventCreateTask{}, state.EventUpdateTask{})
go func() {
defer cancel()
s.watch(ch)
}()
}
s.match()
}
示例8: TestSetup
func TestSetup(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue() /*state.EventCreateTask{}, state.EventUpdateTask{}*/)
defer cancel()
observedTask1 := SetupCluster(t, store, watch)
assert.Equal(t, observedTask1.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask1.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask1.NodeID, "id1")
}
示例9: TestAddNode
func TestAddNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
addNode(t, store, node2)
observedTask2 := testutils.WatchTaskCreate(t, watch)
assert.Equal(t, observedTask2.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask2.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask2.NodeID, "id2")
}
示例10: TestDeleteService
func TestDeleteService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
defer store.Close()
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
SetupCluster(t, store, watch)
deleteService(t, store, service1)
// task should be deleted
observedTask := testutils.WatchTaskDelete(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
示例11: TestDeleteNode
func TestDeleteNode(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
deleteNode(t, store, node1)
// task should be set to dead
observedTask := watchShutdownTask(t, watch)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name1")
assert.Equal(t, observedTask.NodeID, "id1")
}
示例12: TestAddService
func TestAddService(t *testing.T) {
store := store.NewMemoryStore(nil)
assert.NotNil(t, store)
SetupCluster(t, store)
watch, cancel := state.Watch(store.WatchQueue())
defer cancel()
skipEvents(t, watch)
addService(t, store, service2)
observedTask := watchTaskCreate(t, watch)
assert.Equal(t, observedTask.Status.State, api.TaskStateNew)
assert.Equal(t, observedTask.ServiceAnnotations.Name, "name2")
assert.True(t, observedTask.NodeID == "id1")
}
示例13: ViewAndWatch
// ViewAndWatch calls a callback which can observe the state of this
// MemoryStore. It also returns a channel that will return further events from
// this point so the snapshot can be kept up to date. The watch channel must be
// released with watch.StopWatch when it is no longer needed. The channel is
// guaranteed to get all events after the moment of the snapshot, and only
// those events.
func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...state.Event) (watch chan events.Event, cancel func(), err error) {
// Using Update to lock the store and guarantee consistency between
// the watcher and the the state seen by the callback. snapshotReadTx
// exposes this Tx as a ReadTx so the callback can't modify it.
err = store.Update(func(tx Tx) error {
if err := cb(tx); err != nil {
return err
}
watch, cancel = state.Watch(store.WatchQueue(), specifiers...)
return nil
})
if watch != nil && err != nil {
cancel()
cancel = nil
watch = nil
}
return
}
示例14: TestSchedulerFaultyNode
func TestSchedulerFaultyNode(t *testing.T) {
ctx := context.Background()
taskTemplate := &api.Task{
ServiceID: "service1",
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: "name1",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
node1 := &api.Node{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id1",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
node2 := &api.Node{
ID: "id2",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "id2",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
}
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial nodes, and one task assigned to node id1
assert.NoError(t, store.CreateNode(tx, node1))
assert.NoError(t, store.CreateNode(tx, node2))
task1 := taskTemplate.Copy()
task1.ID = "id1"
task1.NodeID = "id1"
task1.Status.State = api.TaskStateRunning
assert.NoError(t, store.CreateTask(tx, task1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
for i := 0; i != 8; i++ {
// Simulate a task failure cycle
newTask := taskTemplate.Copy()
newTask.ID = identity.NewID()
err = s.Update(func(tx store.Tx) error {
assert.NoError(t, store.CreateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
assert.Equal(t, newTask.ID, assignment.ID)
if i < 5 {
// The first 5 attempts should be assigned to node id2 because
// it has no replicas of the service.
assert.Equal(t, "id2", assignment.NodeID)
} else {
// The next ones should be assigned to id1, since we'll
// flag id2 as potentially faulty.
assert.Equal(t, "id1", assignment.NodeID)
}
err = s.Update(func(tx store.Tx) error {
newTask := store.GetTask(tx, newTask.ID)
require.NotNil(t, newTask)
newTask.Status.State = api.TaskStateFailed
assert.NoError(t, store.UpdateTask(tx, newTask))
return nil
})
assert.NoError(t, err)
}
}
示例15: benchScheduler
func benchScheduler(b *testing.B, nodes, tasks int, networkConstraints bool) {
ctx := context.Background()
for iters := 0; iters < b.N; iters++ {
b.StopTimer()
s := store.NewMemoryStore(nil)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
go func() {
_ = scheduler.Run(ctx)
}()
// Let the scheduler get started
runtime.Gosched()
_ = s.Update(func(tx store.Tx) error {
// Create initial nodes and tasks
for i := 0; i < nodes; i++ {
n := &api.Node{
ID: identity.NewID(),
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "name" + strconv.Itoa(i),
Labels: make(map[string]string),
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Engine: &api.EngineDescription{},
},
}
// Give every third node a special network
if i%3 == 0 {
n.Description.Engine.Plugins = []api.PluginDescription{
{
Name: "network",
Type: "Network",
},
}
}
err := store.CreateNode(tx, n)
if err != nil {
panic(err)
}
}
for i := 0; i < tasks; i++ {
id := "task" + strconv.Itoa(i)
t := &api.Task{
ID: id,
DesiredState: api.TaskStateRunning,
ServiceAnnotations: api.Annotations{
Name: id,
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
if networkConstraints {
t.Networks = []*api.NetworkAttachment{
{
Network: &api.Network{
DriverState: &api.Driver{
Name: "network",
},
},
},
}
}
err := store.CreateTask(tx, t)
if err != nil {
panic(err)
}
}
b.StartTimer()
return nil
})
for i := 0; i != tasks; i++ {
<-watch
}
scheduler.Stop()
cancel()
s.Close()
}
}