本文整理匯總了Golang中github.com/docker/swarmkit/manager/state/store.ByNodeID函數的典型用法代碼示例。如果您正苦於以下問題:Golang ByNodeID函數的具體用法?Golang ByNodeID怎麽用?Golang ByNodeID使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ByNodeID函數的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Golang代碼示例。
示例1: removeTasksFromNode
func (g *GlobalOrchestrator) removeTasksFromNode(ctx context.Context, node *api.Node) {
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
for _, t := range tasks {
// GlobalOrchestrator only removes tasks from globalServices
if _, exists := g.globalServices[t.ServiceID]; exists {
g.removeTask(ctx, batch, t)
}
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: removeTasksFromNode failed")
}
}
示例2: reconcileServiceOneNode
// reconcileServiceOneNode checks one service on one node
func (g *GlobalOrchestrator) reconcileServiceOneNode(ctx context.Context, serviceID string, nodeID string) {
_, exists := g.nodes[nodeID]
if !exists {
return
}
service, exists := g.globalServices[serviceID]
if !exists {
return
}
// the node has completed this servie
completed := false
// tasks for this node and service
var (
tasks []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
var tasksOnNode []*api.Task
tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasksOnNode {
// only interested in one service
if t.ServiceID != serviceID {
continue
}
if isTaskRunning(t) {
tasks = append(tasks, t)
} else {
if isTaskCompleted(t, restartCondition(t)) {
completed = true
}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks")
return
}
_, err = g.store.Batch(func(batch *store.Batch) error {
// if restart policy considers this node has finished its task
// it should remove all running tasks
if completed {
g.removeTasks(ctx, batch, service, tasks)
return nil
}
// this node needs to run 1 copy of the task
if len(tasks) == 0 {
g.addTask(ctx, batch, service, nodeID)
} else {
g.removeTasks(ctx, batch, service, tasks[1:])
}
return nil
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcileServiceOneNode batch failed")
}
}
示例3: restartTasksByNodeID
func (r *Orchestrator) restartTasksByNodeID(ctx context.Context, nodeID string) {
var err error
r.store.View(func(tx store.ReadTx) {
var tasks []*api.Task
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
if err != nil {
return
}
for _, t := range tasks {
if t.DesiredState > api.TaskStateRunning {
continue
}
service := store.GetService(tx, t.ServiceID)
if orchestrator.IsReplicatedService(service) {
r.restartTasks[t.ID] = struct{}{}
}
}
})
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to list tasks to remove")
}
}
示例4: moveTasksToOrphaned
func (d *Dispatcher) moveTasksToOrphaned(nodeID string) error {
_, err := d.store.Batch(func(batch *store.Batch) error {
var (
tasks []*api.Task
err error
)
d.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
return err
}
for _, task := range tasks {
if task.Status.State < api.TaskStateOrphaned {
task.Status.State = api.TaskStateOrphaned
}
if err := batch.Update(func(tx store.Tx) error {
err := store.UpdateTask(tx, task)
if err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
return err
}
示例5: shutdownNoncompliantTasks
func (ce *ConstraintEnforcer) shutdownNoncompliantTasks(node *api.Node) {
// If the availability is "drain", the orchestrator will
// shut down all tasks.
// If the availability is "pause", we shouldn't touch
// the tasks on this node.
if node.Spec.Availability != api.NodeAvailabilityActive {
return
}
var (
tasks []*api.Task
err error
)
ce.store.View(func(tx store.ReadTx) {
tasks, err = store.FindTasks(tx, store.ByNodeID(node.ID))
})
if err != nil {
log.L.WithError(err).Errorf("failed to list tasks for node ID %s", node.ID)
}
var availableMemoryBytes, availableNanoCPUs int64
if node.Description != nil && node.Description.Resources != nil {
availableMemoryBytes = node.Description.Resources.MemoryBytes
availableNanoCPUs = node.Description.Resources.NanoCPUs
}
removeTasks := make(map[string]*api.Task)
// TODO(aaronl): The set of tasks removed will be
// nondeterministic because it depends on the order of
// the slice returned from FindTasks. We could do
// a separate pass over the tasks for each type of
// resource, and sort by the size of the reservation
// to remove the most resource-intensive tasks.
for _, t := range tasks {
if t.DesiredState < api.TaskStateAssigned || t.DesiredState > api.TaskStateRunning {
continue
}
// Ensure that the task still meets scheduling
// constraints.
if t.Spec.Placement != nil && len(t.Spec.Placement.Constraints) != 0 {
constraints, _ := constraint.Parse(t.Spec.Placement.Constraints)
if !constraint.NodeMatches(constraints, node) {
removeTasks[t.ID] = t
continue
}
}
// Ensure that the task assigned to the node
// still satisfies the resource limits.
if t.Spec.Resources != nil && t.Spec.Resources.Reservations != nil {
if t.Spec.Resources.Reservations.MemoryBytes > availableMemoryBytes {
removeTasks[t.ID] = t
continue
}
if t.Spec.Resources.Reservations.NanoCPUs > availableNanoCPUs {
removeTasks[t.ID] = t
continue
}
availableMemoryBytes -= t.Spec.Resources.Reservations.MemoryBytes
availableNanoCPUs -= t.Spec.Resources.Reservations.NanoCPUs
}
}
if len(removeTasks) != 0 {
_, err := ce.store.Batch(func(batch *store.Batch) error {
for _, t := range removeTasks {
err := batch.Update(func(tx store.Tx) error {
t = store.GetTask(tx, t.ID)
if t == nil || t.DesiredState > api.TaskStateRunning {
return nil
}
t.DesiredState = api.TaskStateShutdown
return store.UpdateTask(tx, t)
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down task %s", t.ID)
}
}
return nil
})
if err != nil {
log.L.WithError(err).Errorf("failed to shut down tasks")
}
}
}
示例6: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventUpdateTask:
tasksMap[v.Task.ID] = v.Task
case state.EventDeleteTask:
delete(tasksMap, v.Task.ID)
}
case <-stream.Context().Done():
return stream.Context().Err()
case <-d.ctx.Done():
return d.ctx.Err()
}
}
}
示例7: Assignments
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
dctx, err := d.isRunningLocked()
if err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Assignments",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(stream.Context()).WithFields(fields)
log.Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var (
sequence int64
appliesTo string
initial api.AssignmentsMessage
)
tasksMap := make(map[string]*api.Task)
tasksUsingSecret := make(map[string]map[string]struct{})
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
sequence++
msg.AppliesTo = appliesTo
msg.ResultsIn = strconv.FormatInt(sequence, 10)
appliesTo = msg.ResultsIn
msg.Type = assignmentType
if err := stream.Send(&msg); err != nil {
return err
}
return nil
}
// returns a slice of new secrets to send down
addSecretsForTask := func(readTx store.ReadTx, t *api.Task) []*api.Secret {
container := t.Spec.GetContainer()
if container == nil {
return nil
}
var newSecrets []*api.Secret
for _, secretRef := range container.Secrets {
// Empty ID prefix will return all secrets. Bail if there is no SecretID
if secretRef.SecretID == "" {
log.Debugf("invalid secret reference")
continue
}
secretID := secretRef.SecretID
log := log.WithFields(logrus.Fields{
"secret.id": secretID,
"secret.name": secretRef.SecretName,
})
if len(tasksUsingSecret[secretID]) == 0 {
tasksUsingSecret[secretID] = make(map[string]struct{})
secrets, err := store.FindSecrets(readTx, store.ByIDPrefix(secretID))
if err != nil {
log.WithError(err).Errorf("error retrieving secret")
continue
}
if len(secrets) != 1 {
log.Debugf("secret not found")
continue
}
// If the secret was found and there was one result
// (there should never be more than one because of the
// uniqueness constraint), add this secret to our
// initial set that we send down.
newSecrets = append(newSecrets, secrets[0])
}
tasksUsingSecret[secretID][t.ID] = struct{}{}
}
return newSecrets
}
// TODO(aaronl): Also send node secrets that should be exposed to
// this node.
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
//.........這裏部分代碼省略.........
示例8: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
dctx, err := d.isRunningLocked()
if err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
// bursty events should be processed in batches and sent out snapshot
var (
modificationCnt int
batchingTimer *time.Timer
batchingTimeout <-chan time.Time
)
batchingLoop:
for modificationCnt < modificationBatchLimit {
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventUpdateTask:
if oldTask, exists := tasksMap[v.Task.ID]; exists {
// States ASSIGNED and below are set by the orchestrator/scheduler,
// not the agent, so tasks in these states need to be sent to the
// agent even if nothing else has changed.
if equality.TasksEqualStable(oldTask, v.Task) && v.Task.Status.State > api.TaskStateAssigned {
// this update should not trigger action at agent
tasksMap[v.Task.ID] = v.Task
continue
}
}
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventDeleteTask:
//.........這裏部分代碼省略.........
示例9: TestHA
//.........這裏部分代碼省略.........
if t2Assignments[assignment.NodeID] != 0 {
if sharedNodes[0] == "" {
sharedNodes[0] = assignment.NodeID
} else if sharedNodes[1] == "" {
sharedNodes[1] = assignment.NodeID
} else {
t.Fatal("all three assignments went to nodes with service2 tasks")
}
}
}
assert.NotEmpty(t, sharedNodes[0])
assert.NotEmpty(t, sharedNodes[1])
assert.NotEqual(t, sharedNodes[0], sharedNodes[1])
nodesWith4T1Tasks = 0
nodesWith5T1Tasks := 0
for nodeID, taskCount := range t1Assignments {
if taskCount == 4 {
nodesWith4T1Tasks++
} else if taskCount == 5 {
nodesWith5T1Tasks++
} else {
t.Fatalf("unexpected number of tasks %d on node %s", taskCount, nodeID)
}
}
assert.Equal(t, 4, nodesWith4T1Tasks)
assert.Equal(t, 1, nodesWith5T1Tasks)
// Add another task from service2. It must not land on the node that
// has 5 service1 tasks.
err = s.Update(func(tx store.Tx) error {
taskTemplate2.ID = "t2id4"
assert.NoError(t, store.CreateTask(tx, taskTemplate2))
return nil
})
assert.NoError(t, err)
assignment := watchAssignment(t, watch)
if assignment.ID != "t2id4" {
t.Fatal("got assignment for different task")
}
if t2Assignments[assignment.NodeID] != 0 {
t.Fatal("was scheduled on a node that already has a service2 task")
}
if t1Assignments[assignment.NodeID] == 5 {
t.Fatal("was scheduled on the node that has the most service1 tasks")
}
t2Assignments[assignment.NodeID]++
// Remove all tasks on node id1.
err = s.Update(func(tx store.Tx) error {
tasks, err := store.FindTasks(tx, store.ByNodeID("id1"))
assert.NoError(t, err)
for _, task := range tasks {
assert.NoError(t, store.DeleteTask(tx, task.ID))
}
return nil
})
assert.NoError(t, err)
t1Assignments["id1"] = 0
t2Assignments["id1"] = 0
// Add four instances of service1 and two instances of service2.
// All instances of service1 should land on node "id1", and one
// of the two service2 instances should as well.
// Put these in a map to randomize the order in which they are
// created.
err = s.Update(func(tx store.Tx) error {
tasksMap := make(map[string]*api.Task)
for i := 22; i <= 25; i++ {
taskTemplate1.ID = fmt.Sprintf("t1id%d", i)
tasksMap[taskTemplate1.ID] = taskTemplate1.Copy()
}
for i := 5; i <= 6; i++ {
taskTemplate2.ID = fmt.Sprintf("t2id%d", i)
tasksMap[taskTemplate2.ID] = taskTemplate2.Copy()
}
for _, task := range tasksMap {
assert.NoError(t, store.CreateTask(tx, task))
}
return nil
})
assert.NoError(t, err)
for i := 0; i != 4+2; i++ {
assignment := watchAssignment(t, watch)
if strings.HasPrefix(assignment.ID, "t1") {
t1Assignments[assignment.NodeID]++
} else if strings.HasPrefix(assignment.ID, "t2") {
t2Assignments[assignment.NodeID]++
}
}
assert.Equal(t, 4, t1Assignments["id1"])
assert.Equal(t, 1, t2Assignments["id1"])
}
示例10: TestSchedulerResourceConstraintDeadTask
func TestSchedulerResourceConstraintDeadTask(t *testing.T) {
ctx := context.Background()
// Create a ready node without enough memory to run the task.
node := &api.Node{
ID: "id1",
Spec: api.NodeSpec{
Annotations: api.Annotations{
Name: "node",
},
},
Status: api.NodeStatus{
State: api.NodeStatus_READY,
},
Description: &api.NodeDescription{
Resources: &api.Resources{
NanoCPUs: 1e9,
MemoryBytes: 1e9,
},
},
}
bigTask1 := &api.Task{
DesiredState: api.TaskStateRunning,
ID: "id1",
Spec: api.TaskSpec{
Resources: &api.ResourceRequirements{
Reservations: &api.Resources{
MemoryBytes: 8e8,
},
},
},
ServiceAnnotations: api.Annotations{
Name: "big",
},
Status: api.TaskStatus{
State: api.TaskStatePending,
},
}
bigTask2 := bigTask1.Copy()
bigTask2.ID = "id2"
s := store.NewMemoryStore(nil)
assert.NotNil(t, s)
defer s.Close()
err := s.Update(func(tx store.Tx) error {
// Add initial node and task
assert.NoError(t, store.CreateNode(tx, node))
assert.NoError(t, store.CreateTask(tx, bigTask1))
return nil
})
assert.NoError(t, err)
scheduler := New(s)
watch, cancel := state.Watch(s.WatchQueue(), state.EventUpdateTask{})
defer cancel()
go func() {
assert.NoError(t, scheduler.Run(ctx))
}()
defer scheduler.Stop()
// The task fits, so it should get assigned
assignment := watchAssignment(t, watch)
assert.Equal(t, "id1", assignment.ID)
assert.Equal(t, "id1", assignment.NodeID)
err = s.Update(func(tx store.Tx) error {
// Add a second task. It shouldn't get assigned because of
// resource constraints.
return store.CreateTask(tx, bigTask2)
})
assert.NoError(t, err)
time.Sleep(100 * time.Millisecond)
s.View(func(tx store.ReadTx) {
tasks, err := store.FindTasks(tx, store.ByNodeID(node.ID))
assert.NoError(t, err)
assert.Len(t, tasks, 1)
})
err = s.Update(func(tx store.Tx) error {
// The task becomes dead
updatedTask := store.GetTask(tx, bigTask1.ID)
updatedTask.Status.State = api.TaskStateShutdown
return store.UpdateTask(tx, updatedTask)
})
assert.NoError(t, err)
// With the first task no longer consuming resources, the second
// one can be scheduled.
assignment = watchAssignment(t, watch)
assert.Equal(t, "id2", assignment.ID)
assert.Equal(t, "id1", assignment.NodeID)
}
示例11: reconcileServicesOneNode
// reconcileServicesOneNode checks the specified services on one node
func (g *Orchestrator) reconcileServicesOneNode(ctx context.Context, serviceIDs []string, nodeID string) {
node, exists := g.nodes[nodeID]
if !exists {
return
}
// whether each service has completed on the node
completed := make(map[string]bool)
// tasks by service
tasks := make(map[string][]*api.Task)
var (
tasksOnNode []*api.Task
err error
)
g.store.View(func(tx store.ReadTx) {
tasksOnNode, err = store.FindTasks(tx, store.ByNodeID(nodeID))
})
if err != nil {
log.G(ctx).WithError(err).Errorf("global orchestrator: reconcile failed finding tasks on node %s", nodeID)
return
}
for _, serviceID := range serviceIDs {
for _, t := range tasksOnNode {
if t.ServiceID != serviceID {
continue
}
if isTaskRunning(t) {
tasks[serviceID] = append(tasks[serviceID], t)
} else {
if isTaskCompleted(t, orchestrator.RestartCondition(t)) {
completed[serviceID] = true
}
}
}
}
_, err = g.store.Batch(func(batch *store.Batch) error {
for _, serviceID := range serviceIDs {
service, exists := g.globalServices[serviceID]
if !exists {
continue
}
if !constraint.NodeMatches(service.constraints, node) {
continue
}
// if restart policy considers this node has finished its task
// it should remove all running tasks
if completed[serviceID] {
g.removeTasks(ctx, batch, tasks[serviceID])
continue
}
if node.Spec.Availability == api.NodeAvailabilityPause {
// the node is paused, so we won't add or update tasks
continue
}
if len(tasks) == 0 {
g.addTask(ctx, batch, service.Service, nodeID)
} else {
// If task is out of date, update it. This can happen
// on node reconciliation if, for example, we pause a
// node, update the service, and then activate the node
// later.
// We don't use g.updater here for two reasons:
// - This is not a rolling update. Since it was not
// triggered directly by updating the service, it
// should not observe the rolling update parameters
// or show status in UpdateStatus.
// - Calling Update cancels any current rolling updates
// for the service, such as one triggered by service
// reconciliation.
var (
dirtyTasks []*api.Task
cleanTasks []*api.Task
)
for _, t := range tasks[serviceID] {
if orchestrator.IsTaskDirty(service.Service, t) {
dirtyTasks = append(dirtyTasks, t)
} else {
cleanTasks = append(cleanTasks, t)
}
}
if len(cleanTasks) == 0 {
g.addTask(ctx, batch, service.Service, nodeID)
} else {
dirtyTasks = append(dirtyTasks, cleanTasks[1:]...)
}
g.removeTasks(ctx, batch, dirtyTasks)
}
//.........這裏部分代碼省略.........
示例12: tick
func (tr *TaskReaper) tick() {
if len(tr.dirty) == 0 {
return
}
defer func() {
tr.dirty = make(map[instanceTuple]struct{})
}()
var deleteTasks []string
tr.store.View(func(tx store.ReadTx) {
for dirty := range tr.dirty {
service := store.GetService(tx, dirty.serviceID)
if service == nil {
continue
}
taskHistory := tr.taskHistory
if taskHistory < 0 {
continue
}
var historicTasks []*api.Task
switch service.Spec.GetMode().(type) {
case *api.ServiceSpec_Replicated:
var err error
historicTasks, err = store.FindTasks(tx, store.BySlot(dirty.serviceID, dirty.instance))
if err != nil {
continue
}
case *api.ServiceSpec_Global:
tasksByNode, err := store.FindTasks(tx, store.ByNodeID(dirty.nodeID))
if err != nil {
continue
}
for _, t := range tasksByNode {
if t.ServiceID == dirty.serviceID {
historicTasks = append(historicTasks, t)
}
}
}
if int64(len(historicTasks)) <= taskHistory {
continue
}
// TODO(aaronl): This could filter for non-running tasks and use quickselect
// instead of sorting the whole slice.
sort.Sort(tasksByTimestamp(historicTasks))
for _, t := range historicTasks {
if t.DesiredState <= api.TaskStateRunning {
// Don't delete running tasks
continue
}
deleteTasks = append(deleteTasks, t.ID)
taskHistory++
if int64(len(historicTasks)) <= taskHistory {
break
}
}
}
})
if len(deleteTasks) > 0 {
tr.store.Batch(func(batch *store.Batch) error {
for _, taskID := range deleteTasks {
batch.Update(func(tx store.Tx) error {
return store.DeleteTask(tx, taskID)
})
}
return nil
})
}
}
示例13: Assignments
// Assignments is a stream of assignments for a node. Each message contains
// either full list of tasks and secrets for the node, or an incremental update.
func (d *Dispatcher) Assignments(r *api.AssignmentsRequest, stream api.Dispatcher_AssignmentsServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Assignments",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log := log.G(stream.Context()).WithFields(fields)
log.Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var (
sequence int64
appliesTo string
initial api.AssignmentsMessage
)
tasksMap := make(map[string]*api.Task)
sendMessage := func(msg api.AssignmentsMessage, assignmentType api.AssignmentsMessage_Type) error {
sequence++
msg.AppliesTo = appliesTo
msg.ResultsIn = strconv.FormatInt(sequence, 10)
appliesTo = msg.ResultsIn
msg.Type = assignmentType
if err := stream.Send(&msg); err != nil {
return err
}
return nil
}
// TODO(aaronl): Also send node secrets that should be exposed to
// this node.
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
// We only care about tasks that are ASSIGNED or
// higher. If the state is below ASSIGNED, the
// task may not meet the constraints for this
// node, so we have to be careful about sending
// secrets associated with it.
if t.Status.State < api.TaskStateAssigned {
continue
}
tasksMap[t.ID] = t
initial.UpdateTasks = append(initial.UpdateTasks, t)
}
return nil
},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
if err := sendMessage(initial, api.AssignmentsMessage_COMPLETE); err != nil {
return err
}
for {
// Check for session expiration
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
// bursty events should be processed in batches and sent out together
var (
update api.AssignmentsMessage
modificationCnt int
batchingTimer *time.Timer
batchingTimeout <-chan time.Time
updateTasks = make(map[string]*api.Task)
//.........這裏部分代碼省略.........
示例14: Tasks
// Tasks is a stream of tasks state for node. Each message contains full list
// of tasks which should be run on node, if task is not present in that list,
// it should be terminated.
func (d *Dispatcher) Tasks(r *api.TasksRequest, stream api.Dispatcher_TasksServer) error {
nodeInfo, err := ca.RemoteNode(stream.Context())
if err != nil {
return err
}
nodeID := nodeInfo.NodeID
if err := d.isRunningLocked(); err != nil {
return err
}
fields := logrus.Fields{
"node.id": nodeID,
"node.session": r.SessionID,
"method": "(*Dispatcher).Tasks",
}
if nodeInfo.ForwardedBy != nil {
fields["forwarder.id"] = nodeInfo.ForwardedBy.NodeID
}
log.G(stream.Context()).WithFields(fields).Debugf("")
if _, err = d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
tasksMap := make(map[string]*api.Task)
nodeTasks, cancel, err := store.ViewAndWatch(
d.store,
func(readTx store.ReadTx) error {
tasks, err := store.FindTasks(readTx, store.ByNodeID(nodeID))
if err != nil {
return err
}
for _, t := range tasks {
tasksMap[t.ID] = t
}
return nil
},
state.EventCreateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventUpdateTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
state.EventDeleteTask{Task: &api.Task{NodeID: nodeID},
Checks: []state.TaskCheckFunc{state.TaskCheckNodeID}},
)
if err != nil {
return err
}
defer cancel()
for {
if _, err := d.nodes.GetWithSession(nodeID, r.SessionID); err != nil {
return err
}
var tasks []*api.Task
for _, t := range tasksMap {
// dispatcher only sends tasks that have been assigned to a node
if t != nil && t.Status.State >= api.TaskStateAssigned {
tasks = append(tasks, t)
}
}
if err := stream.Send(&api.TasksMessage{Tasks: tasks}); err != nil {
return err
}
// bursty events should be processed in batches and sent out snapshot
const modificationBatchLimit = 200
const eventPausedGap = 50 * time.Millisecond
var modificationCnt int
// eventPaused is true when there have been modifications
// but next event has not arrived within eventPausedGap
eventPaused := false
for modificationCnt < modificationBatchLimit && !eventPaused {
select {
case event := <-nodeTasks:
switch v := event.(type) {
case state.EventCreateTask:
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventUpdateTask:
if oldTask, exists := tasksMap[v.Task.ID]; exists {
if equality.TasksEqualStable(oldTask, v.Task) {
// this update should not trigger action at agent
tasksMap[v.Task.ID] = v.Task
continue
}
}
tasksMap[v.Task.ID] = v.Task
modificationCnt++
case state.EventDeleteTask:
delete(tasksMap, v.Task.ID)
modificationCnt++
}
case <-time.After(eventPausedGap):
//.........這裏部分代碼省略.........